vmwgfx_dmabuf.c 9.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0 OR MIT
2 3
/**************************************************************************
 *
4
 * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

28
#include <drm/ttm/ttm_placement.h>
29

30
#include <drm/drmP.h>
31 32 33 34
#include "vmwgfx_drv.h"


/**
35
 * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
36
 *
37 38
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
39
 * @placement:  The placement to pin it.
40 41
 * @interruptible:  Use interruptible wait.
 *
42 43 44
 * Returns
 *  -ERESTARTSYS if interrupted by a signal.
 */
45 46 47 48
int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
				struct ttm_placement *placement,
				bool interruptible)
49
{
50
	struct ttm_operation_ctx ctx = {interruptible, false };
51 52
	struct ttm_buffer_object *bo = &buf->base;
	int ret;
53
	uint32_t new_flags;
54

55
	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
56 57 58
	if (unlikely(ret != 0))
		return ret;

59
	vmw_execbuf_release_pinned_bo(dev_priv);
T
Thomas Hellstrom 已提交
60

61
	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
62 63 64
	if (unlikely(ret != 0))
		goto err;

65 66 67 68
	if (buf->pin_count > 0)
		ret = ttm_bo_mem_compat(placement, &bo->mem,
					&new_flags) == true ? 0 : -EINVAL;
	else
69
		ret = ttm_bo_validate(bo, placement, &ctx);
70

71 72
	if (!ret)
		vmw_bo_pin_reserved(buf, true);
73 74 75 76

	ttm_bo_unreserve(bo);

err:
77
	ttm_write_unlock(&dev_priv->reservation_sem);
78 79 80 81
	return ret;
}

/**
82
 * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
83
 *
84 85
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
86 87 88 89 90 91 92 93 94
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
95 96 97
int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
				  bool interruptible)
98
{
99
	struct ttm_operation_ctx ctx = {interruptible, false };
100 101
	struct ttm_buffer_object *bo = &buf->base;
	int ret;
102
	uint32_t new_flags;
103

104
	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
105 106 107
	if (unlikely(ret != 0))
		return ret;

108
	vmw_execbuf_release_pinned_bo(dev_priv);
T
Thomas Hellstrom 已提交
109

110
	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
111 112 113
	if (unlikely(ret != 0))
		goto err;

114 115 116 117 118 119
	if (buf->pin_count > 0) {
		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
					&new_flags) == true ? 0 : -EINVAL;
		goto out_unreserve;
	}

120
	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
121
	if (likely(ret == 0) || ret == -ERESTARTSYS)
122
		goto out_unreserve;
123

124
	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
125

126 127 128
out_unreserve:
	if (!ret)
		vmw_bo_pin_reserved(buf, true);
129 130 131

	ttm_bo_unreserve(bo);
err:
132
	ttm_write_unlock(&dev_priv->reservation_sem);
133 134 135 136
	return ret;
}

/**
137
 * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
138
 *
139 140
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
141 142 143 144 145 146 147 148
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
149 150 151
int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
			   struct vmw_dma_buffer *buf,
			   bool interruptible)
152
{
153 154
	return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
					   interruptible);
155 156 157
}

/**
158
 * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
159
 *
160 161
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
162 163
 *
 * @dev_priv:  Driver private.
164
 * @buf:  DMA buffer to pin.
165 166 167 168 169
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
170 171 172
int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
				    struct vmw_dma_buffer *buf,
				    bool interruptible)
173
{
174
	struct ttm_operation_ctx ctx = {interruptible, false };
175 176
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement placement;
177
	struct ttm_place place;
178
	int ret = 0;
179
	uint32_t new_flags;
180

181
	place = vmw_vram_placement.placement[0];
182 183 184 185 186
	place.lpfn = bo->num_pages;
	placement.num_placement = 1;
	placement.placement = &place;
	placement.num_busy_placement = 1;
	placement.busy_placement = &place;
187

188
	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
189 190 191
	if (unlikely(ret != 0))
		return ret;

192
	vmw_execbuf_release_pinned_bo(dev_priv);
193
	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
194 195 196
	if (unlikely(ret != 0))
		goto err_unlock;

197 198 199 200 201
	/*
	 * Is this buffer already in vram but not at the start of it?
	 * In that case, evict it first because TTM isn't good at handling
	 * that situation.
	 */
202 203
	if (bo->mem.mem_type == TTM_PL_VRAM &&
	    bo->mem.start < bo->num_pages &&
204
	    bo->mem.start > 0 &&
205 206 207 208
	    buf->pin_count == 0) {
		ctx.interruptible = false;
		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
	}
209

210 211 212 213
	if (buf->pin_count > 0)
		ret = ttm_bo_mem_compat(&placement, &bo->mem,
					&new_flags) == true ? 0 : -EINVAL;
	else
214
		ret = ttm_bo_validate(bo, &placement, &ctx);
215

216
	/* For some reason we didn't end up at the start of vram */
217
	WARN_ON(ret == 0 && bo->offset != 0);
218 219
	if (!ret)
		vmw_bo_pin_reserved(buf, true);
220 221 222

	ttm_bo_unreserve(bo);
err_unlock:
223
	ttm_write_unlock(&dev_priv->reservation_sem);
224 225 226 227 228

	return ret;
}

/**
229
 * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
230
 *
231
 * This function takes the reservation_sem in write mode.
232 233 234 235 236 237 238 239 240 241 242 243
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to unpin.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
		     struct vmw_dma_buffer *buf,
		     bool interruptible)
{
244 245
	struct ttm_buffer_object *bo = &buf->base;
	int ret;
246

247 248 249 250
	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

251
	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
252 253 254 255 256 257 258 259 260 261 262
	if (unlikely(ret != 0))
		goto err;

	vmw_bo_pin_reserved(buf, false);

	ttm_bo_unreserve(bo);

err:
	ttm_read_unlock(&dev_priv->reservation_sem);
	return ret;
}
263

264
/**
265 266
 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
 * of a buffer.
267
 *
268 269
 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
 * @ptr: SVGAGuestPtr returning the result.
270
 */
271 272
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
			  SVGAGuestPtr *ptr)
273
{
274
	if (bo->mem.mem_type == TTM_PL_VRAM) {
275
		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
276
		ptr->offset = bo->offset;
277
	} else {
278
		ptr->gmrId = bo->mem.start;
279 280 281
		ptr->offset = 0;
	}
}
T
Thomas Hellstrom 已提交
282 283 284


/**
285
 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
T
Thomas Hellstrom 已提交
286
 *
287
 * @vbo: The buffer object. Must be reserved.
T
Thomas Hellstrom 已提交
288 289 290
 * @pin: Whether to pin or unpin.
 *
 */
291
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
T
Thomas Hellstrom 已提交
292
{
293
	struct ttm_operation_ctx ctx = { false, true };
294
	struct ttm_place pl;
T
Thomas Hellstrom 已提交
295
	struct ttm_placement placement;
296
	struct ttm_buffer_object *bo = &vbo->base;
T
Thomas Hellstrom 已提交
297 298 299
	uint32_t old_mem_type = bo->mem.mem_type;
	int ret;

300
	lockdep_assert_held(&bo->resv->lock.base);
T
Thomas Hellstrom 已提交
301

302 303 304 305 306 307 308 309 310
	if (pin) {
		if (vbo->pin_count++ > 0)
			return;
	} else {
		WARN_ON(vbo->pin_count <= 0);
		if (--vbo->pin_count > 0)
			return;
	}

311 312 313
	pl.fpfn = 0;
	pl.lpfn = 0;
	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
314
		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
T
Thomas Hellstrom 已提交
315
	if (pin)
316
		pl.flags |= TTM_PL_FLAG_NO_EVICT;
T
Thomas Hellstrom 已提交
317 318 319

	memset(&placement, 0, sizeof(placement));
	placement.num_placement = 1;
320
	placement.placement = &pl;
T
Thomas Hellstrom 已提交
321

322
	ret = ttm_bo_validate(bo, &placement, &ctx);
T
Thomas Hellstrom 已提交
323 324 325

	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376


/*
 * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
 *
 * @vbo: The buffer object whose map we are tearing down.
 *
 * This function tears down a cached map set up using
 * vmw_dma_buffer_map_and_cache().
 */
void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
{
	if (vbo->map.bo == NULL)
		return;

	ttm_bo_kunmap(&vbo->map);
}


/*
 * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
 *
 * @vbo: The buffer object to map
 * Return: A kernel virtual address or NULL if mapping failed.
 *
 * This function maps a buffer object into the kernel address space, or
 * returns the virtual kernel address of an already existing map. The virtual
 * address remains valid as long as the buffer object is pinned or reserved.
 * The cached map is torn down on either
 * 1) Buffer object move
 * 2) Buffer object swapout
 * 3) Buffer object destruction
 *
 */
void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
{
	struct ttm_buffer_object *bo = &vbo->base;
	bool not_used;
	void *virtual;
	int ret;

	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
	if (virtual)
		return virtual;

	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
	if (ret)
		DRM_ERROR("Buffer object map failed: %d.\n", ret);

	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
}