vmwgfx_drv.h 37.2 KB
Newer Older
1 2
/**************************************************************************
 *
S
Sinclair Yeh 已提交
3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_

#include "vmwgfx_reg.h"
32 33 34 35 36 37 38 39 40
#include <drm/drmP.h>
#include <drm/vmwgfx_drm.h>
#include <drm/drm_hashtab.h>
#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
41
#include "vmwgfx_fence.h"
42

S
Sinclair Yeh 已提交
43
#define VMWGFX_DRIVER_DATE "20150810"
T
Thomas Hellstrom 已提交
44
#define VMWGFX_DRIVER_MAJOR 2
S
Sinclair Yeh 已提交
45
#define VMWGFX_DRIVER_MINOR 9
46
#define VMWGFX_DRIVER_PATCHLEVEL 0
47 48 49
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
50
#define VMWGFX_MAX_VALIDATIONS 2048
51
#define VMWGFX_MAX_DISPLAYS 16
52
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
53
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
54

55 56 57 58 59 60
/*
 * Perhaps we should have sysfs entries for these.
 */
#define VMWGFX_NUM_GB_CONTEXT 256
#define VMWGFX_NUM_GB_SHADER 20000
#define VMWGFX_NUM_GB_SURFACE 32768
61
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
62 63
#define VMWGFX_NUM_DXCONTEXT 256
#define VMWGFX_NUM_DXQUERY 512
64 65
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
			VMWGFX_NUM_GB_SHADER +\
66 67
			VMWGFX_NUM_GB_SURFACE +\
			VMWGFX_NUM_GB_SCREEN_TARGET)
68

69 70
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
71 72
#define VMW_PL_MOB TTM_PL_PRIV1
#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
73

74 75 76 77
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
#define VMW_RES_STREAM ttm_driver_type2
#define VMW_RES_FENCE ttm_driver_type3
78
#define VMW_RES_SHADER ttm_driver_type4
79

80 81 82
struct vmw_fpriv {
	struct drm_master *locked_master;
	struct ttm_object_file *tfile;
83
	struct list_head fence_events;
84
	bool gb_aware;
85 86 87 88
};

struct vmw_dma_buffer {
	struct ttm_buffer_object base;
89
	struct list_head res_list;
90
	s32 pin_count;
91 92
	/* Not ref-counted.  Protected by binding_mutex */
	struct vmw_resource *dx_query_ctx;
93 94
};

95 96 97 98 99 100 101 102 103 104 105 106
/**
 * struct vmw_validate_buffer - Carries validation info about buffers.
 *
 * @base: Validation info for TTM.
 * @hash: Hash entry for quick lookup of the TTM buffer object.
 *
 * This structure contains also driver private validation info
 * on top of the info needed by TTM.
 */
struct vmw_validate_buffer {
	struct ttm_validate_buffer base;
	struct drm_hash_item hash;
107
	bool validate_as_mob;
108 109 110
};

struct vmw_res_func;
111 112 113 114 115
struct vmw_resource {
	struct kref kref;
	struct vmw_private *dev_priv;
	int id;
	bool avail;
116 117 118 119 120
	unsigned long backup_size;
	bool res_dirty; /* Protected by backup buffer reserved */
	bool backup_dirty; /* Protected by backup buffer reserved */
	struct vmw_dma_buffer *backup;
	unsigned long backup_offset;
121
	unsigned long pin_count; /* Protected by resource reserved */
122 123 124
	const struct vmw_res_func *func;
	struct list_head lru_head; /* Protected by the resource lock */
	struct list_head mob_head; /* Protected by @backup reserved */
125
	struct list_head binding_head; /* Protected by binding_mutex */
126
	void (*res_free) (struct vmw_resource *res);
127 128 129
	void (*hw_destroy) (struct vmw_resource *res);
};

130 131 132 133

/*
 * Resources that are managed using ioctls.
 */
134 135 136 137
enum vmw_res_type {
	vmw_res_context,
	vmw_res_surface,
	vmw_res_stream,
138
	vmw_res_shader,
139 140 141
	vmw_res_dx_context,
	vmw_res_cotable,
	vmw_res_view,
142
	vmw_res_max
143 144
};

145 146 147 148
/*
 * Resources that are managed using command streams.
 */
enum vmw_cmdbuf_res_type {
149 150
	vmw_cmdbuf_res_shader,
	vmw_cmdbuf_res_view
151 152 153 154
};

struct vmw_cmdbuf_res_manager;

155 156 157 158 159 160
struct vmw_cursor_snooper {
	struct drm_crtc *crtc;
	size_t age;
	uint32_t *image;
};

161
struct vmw_framebuffer;
162
struct vmw_surface_offset;
163

164 165 166 167 168
struct vmw_surface {
	struct vmw_resource res;
	uint32_t flags;
	uint32_t format;
	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
169
	struct drm_vmw_size base_size;
170 171
	struct drm_vmw_size *sizes;
	uint32_t num_sizes;
172
	bool scanout;
173
	uint32_t array_size;
174 175
	/* TODO so far just a extra pointer */
	struct vmw_cursor_snooper snooper;
176
	struct vmw_surface_offset *offsets;
177 178
	SVGA3dTextureFilter autogen_filter;
	uint32_t multisample_count;
179
	struct list_head view_list;
180 181
};

182
struct vmw_marker_queue {
183
	struct list_head head;
184 185
	u64 lag;
	u64 lag_time;
186 187 188
	spinlock_t lock;
};

189 190
struct vmw_fifo_state {
	unsigned long reserved_size;
191 192
	u32 *dynamic_buffer;
	u32 *static_buffer;
193 194 195
	unsigned long static_buffer_size;
	bool using_bounce_buffer;
	uint32_t capabilities;
196
	struct mutex fifo_mutex;
197
	struct rw_semaphore rwsem;
198
	struct vmw_marker_queue marker_queue;
199
	bool dx;
200 201 202
};

struct vmw_relocation {
203
	SVGAMobId *mob_loc;
204 205 206 207
	SVGAGuestPtr *location;
	uint32_t index;
};

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
/**
 * struct vmw_res_cache_entry - resource information cache entry
 *
 * @valid: Whether the entry is valid, which also implies that the execbuf
 * code holds a reference to the resource, and it's placed on the
 * validation list.
 * @handle: User-space handle of a resource.
 * @res: Non-ref-counted pointer to the resource.
 *
 * Used to avoid frequent repeated user-space handle lookups of the
 * same resource.
 */
struct vmw_res_cache_entry {
	bool valid;
	uint32_t handle;
	struct vmw_resource *res;
	struct vmw_resource_val_node *node;
};

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
/**
 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
 */
enum vmw_dma_map_mode {
	vmw_dma_phys,           /* Use physical page addresses */
	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
	vmw_dma_map_max
};

/**
 * struct vmw_sg_table - Scatter/gather table for binding, with additional
 * device-specific information.
 *
 * @sgt: Pointer to a struct sg_table with binding information
 * @num_regions: Number of regions with device-address contigous pages
 */
struct vmw_sg_table {
	enum vmw_dma_map_mode mode;
	struct page **pages;
	const dma_addr_t *addrs;
	struct sg_table *sgt;
	unsigned long num_regions;
	unsigned long num_pages;
};

/**
 * struct vmw_piter - Page iterator that iterates over a list of pages
 * and DMA addresses that could be either a scatter-gather list or
 * arrays
 *
 * @pages: Array of page pointers to the pages.
 * @addrs: DMA addresses to the pages if coherent pages are used.
 * @iter: Scatter-gather page iterator. Current position in SG list.
 * @i: Current position in arrays.
 * @num_pages: Number of pages total.
 * @next: Function to advance the iterator. Returns false if past the list
 * of pages, true otherwise.
 * @dma_address: Function to return the DMA address of the current page.
 */
struct vmw_piter {
	struct page **pages;
	const dma_addr_t *addrs;
	struct sg_page_iter iter;
	unsigned long i;
	unsigned long num_pages;
	bool (*next)(struct vmw_piter *);
	dma_addr_t (*dma_address)(struct vmw_piter *);
	struct page *(*page)(struct vmw_piter *);
};

279 280 281 282 283 284
/*
 * enum vmw_display_unit_type - Describes the display unit
 */
enum vmw_display_unit_type {
	vmw_du_invalid = 0,
	vmw_du_legacy,
285 286
	vmw_du_screen_object,
	vmw_du_screen_target
287 288 289
};


290
struct vmw_sw_context{
291 292
	struct drm_open_hash res_ht;
	bool res_ht_initialized;
293
	bool kernel; /**< is the called made from the kernel */
294
	struct vmw_fpriv *fp;
295 296 297
	struct list_head validate_nodes;
	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
	uint32_t cur_reloc;
298
	struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
299
	uint32_t cur_val_buf;
300 301
	uint32_t *cmd_bounce;
	uint32_t cmd_bounce_size;
302
	struct list_head resource_list;
303
	struct list_head ctx_resource_list; /* For contexts and cotables */
304
	struct vmw_dma_buffer *cur_query_bo;
305 306 307 308 309 310
	struct list_head res_relocations;
	uint32_t *buf_start;
	struct vmw_res_cache_entry res_cache[vmw_res_max];
	struct vmw_resource *last_query_ctx;
	bool needs_post_query_barrier;
	struct vmw_resource *error_resource;
311 312
	struct vmw_ctx_binding_state *staged_bindings;
	bool staged_bindings_inuse;
313
	struct list_head staged_cmd_res;
314 315 316 317
	struct vmw_resource_val_node *dx_ctx_node;
	struct vmw_dma_buffer *dx_query_mob;
	struct vmw_resource *dx_query_ctx;
	struct vmw_cmdbuf_res_manager *man;
318 319 320 321 322 323 324 325 326
};

struct vmw_legacy_display;
struct vmw_overlay;

struct vmw_master {
	struct ttm_lock lock;
};

327 328 329 330 331 332 333 334
struct vmw_vga_topology_state {
	uint32_t width;
	uint32_t height;
	uint32_t primary;
	uint32_t pos_x;
	uint32_t pos_y;
};

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354

/*
 * struct vmw_otable - Guest Memory OBject table metadata
 *
 * @size:           Size of the table (page-aligned).
 * @page_table:     Pointer to a struct vmw_mob holding the page table.
 */
struct vmw_otable {
	unsigned long size;
	struct vmw_mob *page_table;
	bool enabled;
};

struct vmw_otable_batch {
	unsigned num_otables;
	struct vmw_otable *otables;
	struct vmw_resource *context;
	struct ttm_buffer_object *otable_bo;
};

355 356 357
struct vmw_private {
	struct ttm_bo_device bdev;
	struct ttm_bo_global_ref bo_global_ref;
358
	struct drm_global_reference mem_global_ref;
359 360 361 362 363 364 365 366

	struct vmw_fifo_state fifo;

	struct drm_device *dev;
	unsigned long vmw_chipset;
	unsigned int io_start;
	uint32_t vram_start;
	uint32_t vram_size;
367
	uint32_t prim_bb_mem;
368 369 370 371
	uint32_t mmio_start;
	uint32_t mmio_size;
	uint32_t fb_max_width;
	uint32_t fb_max_height;
372 373 374 375
	uint32_t texture_max_width;
	uint32_t texture_max_height;
	uint32_t stdu_max_width;
	uint32_t stdu_max_height;
376 377
	uint32_t initial_width;
	uint32_t initial_height;
378
	u32 __iomem *mmio_virt;
379 380 381
	int mmio_mtrr;
	uint32_t capabilities;
	uint32_t max_gmr_ids;
382
	uint32_t max_gmr_pages;
383
	uint32_t max_mob_pages;
384
	uint32_t max_mob_size;
385
	uint32_t memory_size;
386
	bool has_gmr;
387
	bool has_mob;
388 389
	spinlock_t hw_lock;
	spinlock_t cap_lock;
390
	bool has_dx;
391 392 393 394 395

	/*
	 * VGA registers.
	 */

396
	struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
397 398 399
	uint32_t vga_width;
	uint32_t vga_height;
	uint32_t vga_bpp;
400
	uint32_t vga_bpl;
401
	uint32_t vga_pitchlock;
402

403 404
	uint32_t num_displays;

405 406 407 408 409
	/*
	 * Framebuffer info.
	 */

	void *fb_info;
410
	enum vmw_display_unit_type active_display_unit;
411
	struct vmw_legacy_display *ldu_priv;
412
	struct vmw_screen_object_display *sou_priv;
413 414 415 416 417 418 419
	struct vmw_overlay *overlay_priv;

	/*
	 * Context and surface management.
	 */

	rwlock_t resource_lock;
420
	struct idr res_idr[vmw_res_max];
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
	/*
	 * Block lastclose from racing with firstopen.
	 */

	struct mutex init_mutex;

	/*
	 * A resource manager for kernel-only surfaces and
	 * contexts.
	 */

	struct ttm_object_device *tdev;

	/*
	 * Fencing and IRQs.
	 */

438
	atomic_t marker_seq;
439 440
	wait_queue_head_t fence_queue;
	wait_queue_head_t fifo_queue;
441 442 443
	spinlock_t waiter_lock;
	int fence_queue_waiters; /* Protected by waiter_lock */
	int goal_queue_waiters; /* Protected by waiter_lock */
444 445
	int cmdbuf_waiters; /* Protected by irq_lock */
	int error_waiters; /* Protected by irq_lock */
446
	atomic_t fifo_queue_waiters;
447
	uint32_t last_read_seqno;
448
	spinlock_t irq_lock;
449
	struct vmw_fence_manager *fman;
T
Thomas Hellstrom 已提交
450
	uint32_t irq_mask;
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468

	/*
	 * Device state
	 */

	uint32_t traces_state;
	uint32_t enable_state;
	uint32_t config_done_state;

	/**
	 * Execbuf
	 */
	/**
	 * Protected by the cmdbuf mutex.
	 */

	struct vmw_sw_context ctx;
	struct mutex cmdbuf_mutex;
469
	struct mutex binding_mutex;
470 471 472 473 474 475

	/**
	 * Operating mode.
	 */

	bool stealth;
476
	bool enable_fb;
477
	spinlock_t svga_lock;
478 479 480 481 482 483 484

	/**
	 * Master management.
	 */

	struct vmw_master *active_master;
	struct vmw_master fbdev_master;
485
	struct notifier_block pm_nb;
486
	bool suspended;
487
	bool refuse_hibernation;
488 489

	struct mutex release_mutex;
490
	atomic_t num_fifo_resources;
T
Thomas Hellstrom 已提交
491

492 493 494 495 496
	/*
	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
	 */
	struct ttm_lock reservation_sem;

T
Thomas Hellstrom 已提交
497 498 499 500 501
	/*
	 * Query processing. These members
	 * are protected by the cmdbuf mutex.
	 */

502 503
	struct vmw_dma_buffer *dummy_query_bo;
	struct vmw_dma_buffer *pinned_bo;
T
Thomas Hellstrom 已提交
504
	uint32_t query_cid;
505
	uint32_t query_cid_valid;
T
Thomas Hellstrom 已提交
506
	bool dummy_query_bo_pinned;
507 508 509 510 511 512 513 514

	/*
	 * Surface swapping. The "surface_lru" list is protected by the
	 * resource lock in order to be able to destroy a surface and take
	 * it off the lru atomically. "used_memory_size" is currently
	 * protected by the cmdbuf mutex for simplicity.
	 */

515
	struct list_head res_lru[vmw_res_max];
516
	uint32_t used_memory_size;
517 518 519 520 521

	/*
	 * DMA mapping stuff.
	 */
	enum vmw_dma_map_mode map_mode;
522 523 524 525

	/*
	 * Guest Backed stuff
	 */
526
	struct vmw_otable_batch otable_batch;
527 528

	struct vmw_cmdbuf_man *cman;
529 530
};

531 532 533 534 535
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
	return container_of(res, struct vmw_surface, res);
}

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
	return (struct vmw_private *)dev->dev_private;
}

static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
{
	return (struct vmw_fpriv *)file_priv->driver_priv;
}

static inline struct vmw_master *vmw_master(struct drm_master *master)
{
	return (struct vmw_master *) master->driver_priv;
}

551 552 553 554 555 556 557
/*
 * The locking here is fine-grained, so that it is performed once
 * for every read- and write operation. This is of course costly, but we
 * don't perform much register access in the timing critical paths anyway.
 * Instead we have the extra benefit of being sure that we don't forget
 * the hw lock around register accesses.
 */
558 559 560
static inline void vmw_write(struct vmw_private *dev_priv,
			     unsigned int offset, uint32_t value)
{
561 562 563
	unsigned long irq_flags;

	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
564 565
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
566
	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
567 568 569 570 571
}

static inline uint32_t vmw_read(struct vmw_private *dev_priv,
				unsigned int offset)
{
572 573
	unsigned long irq_flags;
	u32 val;
574

575
	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
576 577
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
578 579
	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);

580 581 582
	return val;
}

583 584 585
extern void vmw_svga_enable(struct vmw_private *dev_priv);
extern void vmw_svga_disable(struct vmw_private *dev_priv);

586

587 588 589 590 591
/**
 * GMR utilities - vmwgfx_gmr.c
 */

extern int vmw_gmr_bind(struct vmw_private *dev_priv,
592
			const struct vmw_sg_table *vsgt,
593 594
			unsigned long num_pages,
			int gmr_id);
595 596 597 598 599
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);

/**
 * Resource utilities - vmwgfx_resource.c
 */
600
struct vmw_user_resource_conv;
601 602 603

extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
604 605
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
606
extern int vmw_resource_validate(struct vmw_resource *res);
607 608
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
				bool no_backup);
609
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
610 611 612 613 614
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
				  struct ttm_object_file *tfile,
				  uint32_t handle,
				  struct vmw_surface **out_surf,
				  struct vmw_dma_buffer **out_buf);
615 616 617 618 619 620
extern int vmw_user_resource_lookup_handle(
	struct vmw_private *dev_priv,
	struct ttm_object_file *tfile,
	uint32_t handle,
	const struct vmw_user_resource_conv *converter,
	struct vmw_resource **p_res);
621 622 623 624 625 626
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
			   struct vmw_dma_buffer *vmw_bo,
			   size_t size, struct ttm_placement *placement,
			   bool interuptable,
			   void (*bo_free) (struct ttm_buffer_object *bo));
627 628
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
				  struct ttm_object_file *tfile);
629 630 631 632 633 634 635 636 637
extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
				 struct ttm_object_file *tfile,
				 uint32_t size,
				 bool shareable,
				 uint32_t *handle,
				 struct vmw_dma_buffer **p_dma_buf);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
				     struct vmw_dma_buffer *dma_buf,
				     uint32_t *handle);
638 639 640 641
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file_priv);
642 643
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
					 struct drm_file *file_priv);
644 645 646 647 648 649 650 651 652 653 654 655 656
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
					 uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
				  uint32_t id, struct vmw_dma_buffer **out);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file_priv);
extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
				  struct ttm_object_file *tfile,
				  uint32_t *inout_id,
				  struct vmw_resource **out);
657
extern void vmw_resource_unreserve(struct vmw_resource *res,
658
				   bool switch_backup,
659 660 661 662
				   struct vmw_dma_buffer *new_backup,
				   unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
				     struct ttm_mem_reg *mem);
663 664 665
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
				  struct ttm_mem_reg *mem);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
666 667 668
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
				struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
669

670 671 672
/**
 * DMA buffer helper routines - vmwgfx_dmabuf.c
 */
673
extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
674
				       struct vmw_dma_buffer *bo,
675 676 677 678 679 680 681 682 683 684 685
				       struct ttm_placement *placement,
				       bool interruptible);
extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
				  bool interruptible);
extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
					 struct vmw_dma_buffer *buf,
					 bool interruptible);
extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
					   struct vmw_dma_buffer *bo,
					   bool interruptible);
686 687 688
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
			    struct vmw_dma_buffer *bo,
			    bool interruptible);
689 690
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
				 SVGAGuestPtr *ptr);
691
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
692 693 694 695 696 697 698

/**
 * Misc Ioctl functionality - vmwgfx_ioctl.c
 */

extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *file_priv);
699 700
extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv);
701 702 703 704
extern int vmw_present_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
				      struct drm_file *file_priv);
705 706 707 708
extern unsigned int vmw_fops_poll(struct file *filp,
				  struct poll_table_struct *wait);
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
			     size_t count, loff_t *offset);
709 710 711 712 713 714 715 716 717 718

/**
 * Fifo utilities - vmwgfx_fifo.c
 */

extern int vmw_fifo_init(struct vmw_private *dev_priv,
			 struct vmw_fifo_state *fifo);
extern void vmw_fifo_release(struct vmw_private *dev_priv,
			     struct vmw_fifo_state *fifo);
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
719 720
extern void *
vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
721
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
722
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
723
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
724
			       uint32_t *seqno);
725
extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
726
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
727
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
728
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
T
Thomas Hellstrom 已提交
729 730
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
				     uint32_t cid);
731 732
extern int vmw_fifo_flush(struct vmw_private *dev_priv,
			  bool interruptible);
733 734 735 736 737 738 739 740 741 742 743 744 745

/**
 * TTM glue - vmwgfx_ttm_glue.c
 */

extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);

/**
 * TTM buffer object driver - vmwgfx_buffer.c
 */

746
extern const size_t vmw_tt_size;
747 748
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
749
extern struct ttm_placement vmw_vram_sys_placement;
750
extern struct ttm_placement vmw_vram_gmr_placement;
751
extern struct ttm_placement vmw_vram_gmr_ne_placement;
752
extern struct ttm_placement vmw_sys_placement;
753
extern struct ttm_placement vmw_sys_ne_placement;
754
extern struct ttm_placement vmw_evictable_placement;
755
extern struct ttm_placement vmw_srf_placement;
756
extern struct ttm_placement vmw_mob_placement;
757
extern struct ttm_placement vmw_mob_ne_placement;
758 759
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
760 761 762 763
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo);
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
extern void vmw_piter_start(struct vmw_piter *viter,
			    const struct vmw_sg_table *vsgt,
			    unsigned long p_offs);

/**
 * vmw_piter_next - Advance the iterator one page.
 *
 * @viter: Pointer to the iterator to advance.
 *
 * Returns false if past the list of pages, true otherwise.
 */
static inline bool vmw_piter_next(struct vmw_piter *viter)
{
	return viter->next(viter);
}

/**
 * vmw_piter_dma_addr - Return the DMA address of the current page.
 *
 * @viter: Pointer to the iterator
 *
 * Returns the DMA address of the page pointed to by @viter.
 */
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
{
	return viter->dma_address(viter);
}

/**
 * vmw_piter_page - Return a pointer to the current page.
 *
 * @viter: Pointer to the iterator
 *
 * Returns the DMA address of the page pointed to by @viter.
 */
static inline struct page *vmw_piter_page(struct vmw_piter *viter)
{
	return viter->page(viter);
}
803 804 805 806 807

/**
 * Command submission - vmwgfx_execbuf.c
 */

808 809
extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
			     struct drm_file *file_priv, size_t size);
810 811 812 813 814 815
extern int vmw_execbuf_process(struct drm_file *file_priv,
			       struct vmw_private *dev_priv,
			       void __user *user_commands,
			       void *kernel_commands,
			       uint32_t command_size,
			       uint64_t throttle_us,
816
			       uint32_t dx_context_handle,
817
			       struct drm_vmw_fence_rep __user
818 819
			       *user_fence_rep,
			       struct vmw_fence_obj **out_fence);
820 821 822
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
					    struct vmw_fence_obj *fence);
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
T
Thomas Hellstrom 已提交
823

824 825 826 827
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
				      struct vmw_private *dev_priv,
				      struct vmw_fence_obj **p_fence,
				      uint32_t *p_handle);
T
Thomas Hellstrom 已提交
828 829 830 831 832 833 834
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
					struct vmw_fpriv *vmw_fp,
					int ret,
					struct drm_vmw_fence_rep __user
					*user_fence_rep,
					struct vmw_fence_obj *fence,
					uint32_t fence_handle);
835 836 837 838 839
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
				      struct ttm_buffer_object *bo,
				      bool interruptible,
				      bool validate_as_mob);

840

841 842 843 844
/**
 * IRQs and wating - vmwgfx_irq.c
 */

D
Daniel Vetter 已提交
845
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
846
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
847 848
			  uint32_t seqno, bool interruptible,
			  unsigned long timeout);
849 850 851
extern void vmw_irq_preinstall(struct drm_device *dev);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev);
852 853
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
				uint32_t seqno);
854 855 856
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
			     bool lazy,
			     bool fifo_idle,
857
			     uint32_t seqno,
858 859
			     bool interruptible,
			     unsigned long timeout);
860
extern void vmw_update_seqno(struct vmw_private *dev_priv,
861
				struct vmw_fifo_state *fifo_state);
862 863
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
T
Thomas Hellstrom 已提交
864 865
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
866 867 868 869
extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
				   int *waiter_count);
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
				      u32 flag, int *waiter_count);
870 871

/**
872 873
 * Rudimentary fence-like objects currently used only for throttling -
 * vmwgfx_marker.c
874 875
 */

876 877 878
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
extern int vmw_marker_push(struct vmw_marker_queue *queue,
879
			   uint32_t seqno);
880
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
881
			   uint32_t signaled_seqno);
882
extern int vmw_wait_lag(struct vmw_private *dev_priv,
883
			struct vmw_marker_queue *queue, uint32_t us);
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908

/**
 * Kernel framebuffer - vmwgfx_fb.c
 */

int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);

/**
 * Kernel modesetting - vmwgfx_kms.c
 */

int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv);
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
			  struct ttm_object_file *tfile,
			  struct ttm_buffer_object *bo,
			  SVGA3dCmdHeader *header);
909 910 911
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
		       unsigned width, unsigned height, unsigned pitch,
		       unsigned bpp, unsigned depth);
912
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
913 914 915
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
				uint32_t pitch,
				uint32_t height);
916
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
J
Jakob Bornecrantz 已提交
917 918
int vmw_enable_vblank(struct drm_device *dev, int crtc);
void vmw_disable_vblank(struct drm_device *dev, int crtc);
919 920 921 922 923 924 925
int vmw_kms_present(struct vmw_private *dev_priv,
		    struct drm_file *file_priv,
		    struct vmw_framebuffer *vfb,
		    struct vmw_surface *surface,
		    uint32_t sid, int32_t destX, int32_t destY,
		    struct drm_vmw_rect *clips,
		    uint32_t num_clips);
926 927
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv);
928

D
Dave Airlie 已提交
929 930 931 932 933 934 935 936 937 938
int vmw_dumb_create(struct drm_file *file_priv,
		    struct drm_device *dev,
		    struct drm_mode_create_dumb *args);

int vmw_dumb_map_offset(struct drm_file *file_priv,
			struct drm_device *dev, uint32_t handle,
			uint64_t *offset);
int vmw_dumb_destroy(struct drm_file *file_priv,
		     struct drm_device *dev,
		     uint32_t handle);
939
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
940
extern void vmw_resource_unpin(struct vmw_resource *res);
941
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
942

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
/**
 * Overlay control - vmwgfx_overlay.c
 */

int vmw_overlay_init(struct vmw_private *dev_priv);
int vmw_overlay_close(struct vmw_private *dev_priv);
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv);
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);

959 960 961 962 963 964
/**
 * GMR Id manager
 */

extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;

965 966 967 968 969 970 971 972 973 974 975 976 977
/**
 * Prime - vmwgfx_prime.c
 */

extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
extern int vmw_prime_fd_to_handle(struct drm_device *dev,
				  struct drm_file *file_priv,
				  int fd, u32 *handle);
extern int vmw_prime_handle_to_fd(struct drm_device *dev,
				  struct drm_file *file_priv,
				  uint32_t handle, uint32_t flags,
				  int *prime_fd);

978 979 980 981 982
/*
 * MemoryOBject management -  vmwgfx_mob.c
 */
struct vmw_mob;
extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
983 984
			const struct vmw_sg_table *vsgt,
			unsigned long num_data_pages, int32_t mob_id);
985 986 987 988 989 990
extern void vmw_mob_unbind(struct vmw_private *dev_priv,
			   struct vmw_mob *mob);
extern void vmw_mob_destroy(struct vmw_mob *mob);
extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
extern int vmw_otables_setup(struct vmw_private *dev_priv);
extern void vmw_otables_takedown(struct vmw_private *dev_priv);
991

992 993 994 995 996 997 998 999 1000 1001 1002 1003
/*
 * Context management - vmwgfx_context.c
 */

extern const struct vmw_user_resource_conv *user_context_converter;

extern int vmw_context_check(struct vmw_private *dev_priv,
			     struct ttm_object_file *tfile,
			     int id,
			     struct vmw_resource **p_res);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file_priv);
1004 1005
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
					     struct drm_file *file_priv);
1006 1007
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
				     struct drm_file *file_priv);
1008
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1009 1010
extern struct vmw_cmdbuf_res_manager *
vmw_context_res_man(struct vmw_resource *ctx);
1011 1012 1013 1014 1015 1016 1017 1018
extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
						SVGACOTableType cotable_type);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
struct vmw_ctx_binding_state;
extern struct vmw_ctx_binding_state *
vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
					  bool readback);
1019 1020 1021 1022 1023
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
				     struct vmw_dma_buffer *mob);
extern struct vmw_dma_buffer *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);

1024

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
/*
 * Surface management - vmwgfx_surface.c
 */

extern const struct vmw_user_resource_conv *user_surface_converter;

extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
				     struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file_priv);
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
				       struct drm_file *file_priv);
extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
				       struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
					  struct drm_file *file_priv);
extern int vmw_surface_check(struct vmw_private *dev_priv,
			     struct ttm_object_file *tfile,
			     uint32_t handle, int *id);
extern int vmw_surface_validate(struct vmw_private *dev_priv,
				struct vmw_surface *srf);
1047 1048 1049 1050 1051 1052 1053
int vmw_surface_gb_priv_define(struct drm_device *dev,
			       uint32_t user_accounting_size,
			       uint32_t svga3d_flags,
			       SVGA3dSurfaceFormat format,
			       bool for_scanout,
			       uint32_t num_mip_levels,
			       uint32_t multisample_count,
1054
			       uint32_t array_size,
1055 1056
			       struct drm_vmw_size size,
			       struct vmw_surface **srf_out);
1057

1058 1059 1060 1061
/*
 * Shader management - vmwgfx_shader.c
 */

1062 1063
extern const struct vmw_user_resource_conv *user_shader_converter;

1064 1065 1066 1067
extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file_priv);
1068 1069
extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
				 struct vmw_cmdbuf_res_manager *man,
1070 1071 1072 1073
				 u32 user_key, const void *bytecode,
				 SVGA3dShaderType shader_type,
				 size_t size,
				 struct list_head *list);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
			     u32 user_key, SVGA3dShaderType shader_type,
			     struct list_head *list);
extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
			     struct vmw_resource *ctx,
			     u32 user_key,
			     SVGA3dShaderType shader_type,
			     struct list_head *list);
extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
					     struct list_head *list,
					     bool readback);

1086
extern struct vmw_resource *
1087 1088
vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
		  u32 user_key, SVGA3dShaderType shader_type);
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

/*
 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
 */

extern struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
extern size_t vmw_cmdbuf_res_man_size(void);
extern struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
		      enum vmw_cmdbuf_res_type res_type,
		      u32 user_key);
extern void vmw_cmdbuf_res_revert(struct list_head *list);
extern void vmw_cmdbuf_res_commit(struct list_head *list);
extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
			      enum vmw_cmdbuf_res_type res_type,
			      u32 user_key,
			      struct vmw_resource *res,
			      struct list_head *list);
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
				 enum vmw_cmdbuf_res_type res_type,
				 u32 user_key,
1112 1113
				 struct list_head *list,
				 struct vmw_resource **res);
1114

1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
/*
 * COTable management - vmwgfx_cotable.c
 */
extern const SVGACOTableType vmw_cotable_scrub_order[];
extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
					      struct vmw_resource *ctx,
					      u32 type);
extern int vmw_cotable_notify(struct vmw_resource *res, int id);
extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
				     struct list_head *head);
1126

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
/*
 * Command buffer managerment vmwgfx_cmdbuf.c
 */
struct vmw_cmdbuf_man;
struct vmw_cmdbuf_header;

extern struct vmw_cmdbuf_man *
vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
				    size_t size, size_t default_size);
extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
			   unsigned long timeout);
extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
				int ctx_id, bool interruptible,
				struct vmw_cmdbuf_header *header);
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
			      struct vmw_cmdbuf_header *header,
			      bool flush);
extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
			      size_t size, bool interruptible,
			      struct vmw_cmdbuf_header **p_header);
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
				bool interruptible);


1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
/**
 * Inline helper functions
 */

static inline void vmw_surface_unreference(struct vmw_surface **srf)
{
	struct vmw_surface *tmp_srf = *srf;
	struct vmw_resource *res = &tmp_srf->res;
	*srf = NULL;

	vmw_resource_unreference(&res);
}

static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
{
	(void) vmw_resource_reference(&srf->res);
	return srf;
}

static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
{
	struct vmw_dma_buffer *tmp_buf = *buf;
1178

1179
	*buf = NULL;
1180 1181
	if (tmp_buf != NULL) {
		struct ttm_buffer_object *bo = &tmp_buf->base;
1182

1183 1184
		ttm_bo_unref(&bo);
	}
1185 1186 1187 1188 1189 1190 1191 1192 1193
}

static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
{
	if (ttm_bo_reference(&buf->base))
		return buf;
	return NULL;
}

1194 1195 1196 1197
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
{
	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
}
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207

static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{
	atomic_inc(&dev_priv->num_fifo_resources);
}

static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
{
	atomic_dec(&dev_priv->num_fifo_resources);
}
1208
#endif