habanalabs.h 70.7 KB
Newer Older
O
Oded Gabbay 已提交
1 2 3 4 5 6 7 8 9 10
/* SPDX-License-Identifier: GPL-2.0
 *
 * Copyright 2016-2019 HabanaLabs, Ltd.
 * All Rights Reserved.
 *
 */

#ifndef HABANALABSP_H_
#define HABANALABSP_H_

11
#include "../include/common/cpucp_if.h"
12
#include "../include/common/qman_if.h"
13
#include <uapi/misc/habanalabs.h>
14

O
Oded Gabbay 已提交
15
#include <linux/cdev.h>
16
#include <linux/iopoll.h>
17
#include <linux/irqreturn.h>
18 19
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
20
#include <linux/hashtable.h>
21
#include <linux/bitfield.h>
O
Oded Gabbay 已提交
22 23 24

#define HL_NAME				"habanalabs"

O
Oded Gabbay 已提交
25 26 27 28 29 30 31 32 33 34 35 36 37
/* Use upper bits of mmap offset to store habana driver specific information.
 * bits[63:62] - Encode mmap type
 * bits[45:0]  - mmap offset value
 *
 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
 *  defines are w.r.t to PAGE_SIZE
 */
#define HL_MMAP_TYPE_SHIFT		(62 - PAGE_SHIFT)
#define HL_MMAP_TYPE_MASK		(0x3ull << HL_MMAP_TYPE_SHIFT)
#define HL_MMAP_TYPE_CB			(0x2ull << HL_MMAP_TYPE_SHIFT)

#define HL_MMAP_OFFSET_VALUE_MASK	(0x3FFFFFFFFFFFull >> PAGE_SHIFT)
#define HL_MMAP_OFFSET_VALUE_GET(off)	(off & HL_MMAP_OFFSET_VALUE_MASK)
38

39
#define HL_PENDING_RESET_PER_SEC	30
40

41 42
#define HL_HARD_RESET_MAX_TIMEOUT	120

43 44
#define HL_DEVICE_TIMEOUT_USEC		1000000 /* 1 s */

45 46
#define HL_HEARTBEAT_PER_USEC		5000000 /* 5 s */

47 48
#define HL_PLL_LOW_JOB_FREQ_USEC	5000000 /* 5 s */

49 50
#define HL_CPUCP_INFO_TIMEOUT_USEC	10000000 /* 10s */
#define HL_CPUCP_EEPROM_TIMEOUT_USEC	10000000 /* 10s */
51

52 53
#define HL_PCI_ELBI_TIMEOUT_MSEC	10 /* 10ms */

O
Oded Gabbay 已提交
54 55
#define HL_SIM_MAX_TIMEOUT_US		10000000 /* 10s */

56 57
#define HL_IDLE_BUSY_TS_ARR_SIZE	4096

58 59 60 61 62 63
/* Memory */
#define MEM_HASH_TABLE_BITS		7 /* 1 << 7 buckets */

/* MMU */
#define MMU_HASH_TABLE_BITS		7 /* 1 << 7 buckets */

64 65 66 67
/*
 * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
 * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
 */
68 69 70 71 72 73 74 75
#define HL_RSVD_SOBS			4
#define HL_RSVD_MONS			2

#define HL_RSVD_SOBS_IN_USE		2
#define HL_RSVD_MONS_IN_USE		1

#define HL_MAX_SOB_VAL			(1 << 15)

76 77 78
#define IS_POWER_OF_2(n)		(n != 0 && ((n & (n - 1)) == 0))
#define IS_MAX_PENDING_CS_VALID(n)	(IS_POWER_OF_2(n) && (n > 1))

O
Ofir Bitton 已提交
79 80
#define HL_PCI_NUM_BARS			6

81 82
#define HL_MAX_DCORES			4

83 84
/**
 * struct pgt_info - MMU hop page info.
85 86 87
 * @node: hash linked-list node for the pgts shadow hash of pgts.
 * @phys_addr: physical address of the pgt.
 * @shadow_addr: shadow hop in the host.
88 89 90 91 92 93 94 95 96
 * @ctx: pointer to the owner ctx.
 * @num_of_ptes: indicates how many ptes are used in the pgt.
 *
 * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
 * is needed during mapping, a new page is allocated and this structure holds
 * its essential information. During unmapping, if no valid PTEs remained in the
 * page, it is freed with its pgt_info structure.
 */
struct pgt_info {
97 98 99 100 101
	struct hlist_node	node;
	u64			phys_addr;
	u64			shadow_addr;
	struct hl_ctx		*ctx;
	int			num_of_ptes;
102 103
};

O
Oded Gabbay 已提交
104
struct hl_device;
105
struct hl_fpriv;
O
Oded Gabbay 已提交
106

O
Ofir Bitton 已提交
107 108 109 110 111 112 113 114 115 116
/**
 * enum hl_pci_match_mode - pci match mode per region
 * @PCI_ADDRESS_MATCH_MODE: address match mode
 * @PCI_BAR_MATCH_MODE: bar match mode
 */
enum hl_pci_match_mode {
	PCI_ADDRESS_MATCH_MODE,
	PCI_BAR_MATCH_MODE
};

117 118 119 120 121 122 123 124 125 126
/**
 * enum hl_fw_component - F/W components to read version through registers.
 * @FW_COMP_UBOOT: u-boot.
 * @FW_COMP_PREBOOT: preboot.
 */
enum hl_fw_component {
	FW_COMP_UBOOT,
	FW_COMP_PREBOOT
};

O
Oded Gabbay 已提交
127 128 129 130 131 132 133 134
/**
 * enum hl_queue_type - Supported QUEUE types.
 * @QUEUE_TYPE_NA: queue is not available.
 * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
 *                  host.
 * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
 *			memories and/or operates the compute engines.
 * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
T
Tomer Tayar 已提交
135 136
 * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
 *                 notifications are sent by H/W.
O
Oded Gabbay 已提交
137 138 139 140 141
 */
enum hl_queue_type {
	QUEUE_TYPE_NA,
	QUEUE_TYPE_EXT,
	QUEUE_TYPE_INT,
T
Tomer Tayar 已提交
142 143
	QUEUE_TYPE_CPU,
	QUEUE_TYPE_HW
O
Oded Gabbay 已提交
144 145
};

146 147 148 149 150 151
enum hl_cs_type {
	CS_TYPE_DEFAULT,
	CS_TYPE_SIGNAL,
	CS_TYPE_WAIT
};

O
Ofir Bitton 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
/*
 * struct hl_inbound_pci_region - inbound region descriptor
 * @mode: pci match mode for this region
 * @addr: region target address
 * @size: region size in bytes
 * @offset_in_bar: offset within bar (address match mode)
 * @bar: bar id
 */
struct hl_inbound_pci_region {
	enum hl_pci_match_mode	mode;
	u64			addr;
	u64			size;
	u64			offset_in_bar;
	u8			bar;
};

/*
 * struct hl_outbound_pci_region - outbound region descriptor
 * @addr: region target address
 * @size: region size in bytes
 */
struct hl_outbound_pci_region {
	u64	addr;
	u64	size;
};

178 179 180 181 182 183 184 185 186 187 188 189 190 191
/*
 * struct hl_hw_sob - H/W SOB info.
 * @hdev: habanalabs device structure.
 * @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
 * @sob_id: id of this SOB.
 * @q_idx: the H/W queue that uses this SOB.
 */
struct hl_hw_sob {
	struct hl_device	*hdev;
	struct kref		kref;
	u32			sob_id;
	u32			q_idx;
};

O
Oded Gabbay 已提交
192 193 194
/**
 * struct hw_queue_properties - queue information.
 * @type: queue type.
195 196
 * @driver_only: true if only the driver is allowed to send a job to this queue,
 *               false otherwise.
197 198
 * @requires_kernel_cb: true if a CB handle must be provided for jobs on this
 *                      queue, false otherwise (a CB address must be provided).
199
 * @supports_sync_stream: True if queue supports sync stream
O
Oded Gabbay 已提交
200 201 202
 */
struct hw_queue_properties {
	enum hl_queue_type	type;
203
	u8			driver_only;
204
	u8			requires_kernel_cb;
205
	u8			supports_sync_stream;
O
Oded Gabbay 已提交
206
};
O
Oded Gabbay 已提交
207

208 209 210
/**
 * enum vm_type_t - virtual memory mapping request information.
 * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
211
 * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
212 213
 */
enum vm_type_t {
214 215
	VM_TYPE_USERPTR = 0x1,
	VM_TYPE_PHYS_PACK = 0x2
216 217
};

218 219 220 221 222 223 224 225 226 227 228 229
/**
 * enum hl_device_hw_state - H/W device state. use this to understand whether
 *                           to do reset before hw_init or not
 * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
 * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
 *                            hw_init
 */
enum hl_device_hw_state {
	HL_DEVICE_HW_STATE_CLEAN = 0,
	HL_DEVICE_HW_STATE_DIRTY
};

230 231
/**
 * struct hl_mmu_properties - ASIC specific MMU address translation properties.
232 233
 * @start_addr: virtual start address of the memory region.
 * @end_addr: virtual end address of the memory region.
234 235 236 237 238
 * @hop0_shift: shift of hop 0 mask.
 * @hop1_shift: shift of hop 1 mask.
 * @hop2_shift: shift of hop 2 mask.
 * @hop3_shift: shift of hop 3 mask.
 * @hop4_shift: shift of hop 4 mask.
239
 * @hop5_shift: shift of hop 5 mask.
240 241 242 243 244
 * @hop0_mask: mask to get the PTE address in hop 0.
 * @hop1_mask: mask to get the PTE address in hop 1.
 * @hop2_mask: mask to get the PTE address in hop 2.
 * @hop3_mask: mask to get the PTE address in hop 3.
 * @hop4_mask: mask to get the PTE address in hop 4.
245
 * @hop5_mask: mask to get the PTE address in hop 5.
246
 * @page_size: default page size used to allocate memory.
247
 * @num_hops: The amount of hops supported by the translation table.
248 249
 */
struct hl_mmu_properties {
250 251
	u64	start_addr;
	u64	end_addr;
252 253 254 255 256
	u64	hop0_shift;
	u64	hop1_shift;
	u64	hop2_shift;
	u64	hop3_shift;
	u64	hop4_shift;
257
	u64	hop5_shift;
258 259 260 261 262
	u64	hop0_mask;
	u64	hop1_mask;
	u64	hop2_mask;
	u64	hop3_mask;
	u64	hop4_mask;
263
	u64	hop5_mask;
264
	u32	page_size;
265
	u32	num_hops;
266 267
};

O
Oded Gabbay 已提交
268 269
/**
 * struct asic_fixed_properties - ASIC specific immutable properties.
O
Oded Gabbay 已提交
270
 * @hw_queues_props: H/W queues properties.
271
 * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
272
 *		available sensors.
273 274
 * @uboot_ver: F/W U-boot version.
 * @preboot_ver: F/W Preboot version.
275 276
 * @dmmu: DRAM MMU address translation properties.
 * @pmmu: PCI (host) MMU address translation properties.
277 278
 * @pmmu_huge: PCI (host) MMU address translation properties for memory
 *              allocated with huge pages.
O
Oded Gabbay 已提交
279 280 281 282 283 284 285 286
 * @sram_base_address: SRAM physical start address.
 * @sram_end_address: SRAM physical end address.
 * @sram_user_base_address - SRAM physical start address for user access.
 * @dram_base_address: DRAM physical start address.
 * @dram_end_address: DRAM physical end address.
 * @dram_user_base_address: DRAM physical start address for user access.
 * @dram_size: DRAM total size.
 * @dram_pci_bar_size: size of PCI bar towards DRAM.
287
 * @max_power_default: max power of the device after reset
288 289
 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
 *                                      fault.
290 291
 * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
292
 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
293
 * @mmu_dram_default_page_addr: DRAM default page physical address.
294 295 296 297 298
 * @mmu_pgt_size: MMU page tables total size.
 * @mmu_pte_size: PTE size in MMU page tables.
 * @mmu_hop_table_size: MMU hop table size.
 * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
 * @dram_page_size: page size for MMU DRAM allocation.
O
Oded Gabbay 已提交
299 300 301
 * @cfg_size: configuration space size on SRAM.
 * @sram_size: total size of SRAM.
 * @max_asid: maximum number of open contexts (ASIDs).
302
 * @num_of_events: number of possible internal H/W IRQs.
303 304 305 306
 * @psoc_pci_pll_nr: PCI PLL NR value.
 * @psoc_pci_pll_nf: PCI PLL NF value.
 * @psoc_pci_pll_od: PCI PLL OD value.
 * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
307
 * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
O
Oded Gabbay 已提交
308
 * @high_pll: high PLL frequency used by the device.
309 310
 * @cb_pool_cb_cnt: number of CBs in the CB pool.
 * @cb_pool_cb_size: size of each CB in the CB pool.
311 312
 * @max_pending_cs: maximum of concurrent pending command submissions
 * @max_queues: maximum amount of queues in the system
313 314
 * @sync_stream_first_sob: first sync object available for sync stream use
 * @sync_stream_first_mon: first monitor available for sync stream use
315 316
 * @first_available_user_sob: first sob available for the user
 * @first_available_user_mon: first monitor available for the user
317
 * @tpc_enabled_mask: which TPCs are enabled.
318
 * @completion_queues_count: number of completion queues.
O
Oded Gabbay 已提交
319 320
 */
struct asic_fixed_properties {
321
	struct hw_queue_properties	*hw_queues_props;
322
	struct cpucp_info		cpucp_info;
323 324 325 326
	char				uboot_ver[VERSION_MAX_LEN];
	char				preboot_ver[VERSION_MAX_LEN];
	struct hl_mmu_properties	dmmu;
	struct hl_mmu_properties	pmmu;
327
	struct hl_mmu_properties	pmmu_huge;
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	u64				sram_base_address;
	u64				sram_end_address;
	u64				sram_user_base_address;
	u64				dram_base_address;
	u64				dram_end_address;
	u64				dram_user_base_address;
	u64				dram_size;
	u64				dram_pci_bar_size;
	u64				max_power_default;
	u64				dram_size_for_default_page_mapping;
	u64				pcie_dbi_base_address;
	u64				pcie_aux_dbi_reg_addr;
	u64				mmu_pgt_addr;
	u64				mmu_dram_default_page_addr;
	u32				mmu_pgt_size;
	u32				mmu_pte_size;
	u32				mmu_hop_table_size;
	u32				mmu_hop0_tables_total_size;
	u32				dram_page_size;
	u32				cfg_size;
	u32				sram_size;
	u32				max_asid;
	u32				num_of_events;
	u32				psoc_pci_pll_nr;
	u32				psoc_pci_pll_nf;
	u32				psoc_pci_pll_od;
	u32				psoc_pci_pll_div_factor;
355
	u32				psoc_timestamp_frequency;
356 357 358
	u32				high_pll;
	u32				cb_pool_cb_cnt;
	u32				cb_pool_cb_size;
359
	u32				max_pending_cs;
360
	u32				max_queues;
361 362
	u16				sync_stream_first_sob;
	u16				sync_stream_first_mon;
363 364
	u16				first_available_user_sob[HL_MAX_DCORES];
	u16				first_available_user_mon[HL_MAX_DCORES];
365 366
	u8				tpc_enabled_mask;
	u8				completion_queues_count;
O
Oded Gabbay 已提交
367 368
};

369 370 371 372 373 374 375 376 377 378 379 380 381
/**
 * struct hl_fence - software synchronization primitive
 * @completion: fence is implemented using completion
 * @refcount: refcount for this fence
 * @error: mark this fence with error
 *
 */
struct hl_fence {
	struct completion	completion;
	struct kref		refcount;
	int			error;
};

382
/**
383
 * struct hl_cs_compl - command submission completion object.
384
 * @base_fence: hl fence object.
385 386
 * @lock: spinlock to protect fence.
 * @hdev: habanalabs device structure.
387
 * @hw_sob: the H/W SOB used in this signal/wait CS.
388
 * @cs_seq: command submission sequence number.
389 390
 * @type: type of the CS - signal/wait.
 * @sob_val: the SOB value that is used in this signal/wait CS.
391
 */
392
struct hl_cs_compl {
393
	struct hl_fence		base_fence;
394 395
	spinlock_t		lock;
	struct hl_device	*hdev;
396
	struct hl_hw_sob	*hw_sob;
397
	u64			cs_seq;
398 399
	enum hl_cs_type		type;
	u16			sob_val;
400
};
O
Oded Gabbay 已提交
401

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
/*
 * Command Buffers
 */

/**
 * struct hl_cb_mgr - describes a Command Buffer Manager.
 * @cb_lock: protects cb_handles.
 * @cb_handles: an idr to hold all command buffer handles.
 */
struct hl_cb_mgr {
	spinlock_t		cb_lock;
	struct idr		cb_handles; /* protected by cb_lock */
};

/**
 * struct hl_cb - describes a Command Buffer.
 * @refcount: reference counter for usage of the CB.
 * @hdev: pointer to device this CB belongs to.
 * @lock: spinlock to protect mmap/cs flows.
O
Oded Gabbay 已提交
421
 * @debugfs_list: node in debugfs list of command buffers.
422
 * @pool_list: node in pool list of command buffers.
423
 * @id: the CB's ID.
424 425 426 427
 * @kernel_address: Holds the CB's kernel virtual address.
 * @bus_address: Holds the CB's DMA address.
 * @mmap_size: Holds the CB's size that was mmaped.
 * @size: holds the CB's size.
428
 * @cs_cnt: holds number of CS that this CB participates in.
429 430 431
 * @ctx_id: holds the ID of the owner's context.
 * @mmap: true if the CB is currently mmaped to user.
 * @is_pool: true if CB was acquired from the pool, false otherwise.
432
 * @is_internal: internaly allocated
433 434 435 436 437
 */
struct hl_cb {
	struct kref		refcount;
	struct hl_device	*hdev;
	spinlock_t		lock;
O
Oded Gabbay 已提交
438
	struct list_head	debugfs_list;
439
	struct list_head	pool_list;
440
	u64			id;
441 442 443 444
	u64			kernel_address;
	dma_addr_t		bus_address;
	u32			mmap_size;
	u32			size;
445
	u32			cs_cnt;
446 447 448
	u32			ctx_id;
	u8			mmap;
	u8			is_pool;
449
	u8			is_internal;
450 451 452
};


O
Oded Gabbay 已提交
453 454 455 456 457 458
/*
 * QUEUES
 */

struct hl_cs_job;

O
Ofir Bitton 已提交
459 460
/* Queue length of external and HW queues */
#define HL_QUEUE_LENGTH			4096
O
Oded Gabbay 已提交
461 462
#define HL_QUEUE_SIZE_IN_BYTES		(HL_QUEUE_LENGTH * HL_BD_SIZE)

463 464 465 466
#if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
#error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
#endif

O
Ofir Bitton 已提交
467
/* HL_CQ_LENGTH is in units of struct hl_cq_entry */
O
Oded Gabbay 已提交
468 469 470
#define HL_CQ_LENGTH			HL_QUEUE_LENGTH
#define HL_CQ_SIZE_IN_BYTES		(HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)

O
Ofir Bitton 已提交
471
/* Must be power of 2 */
472 473
#define HL_EQ_LENGTH			64
#define HL_EQ_SIZE_IN_BYTES		(HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
O
Oded Gabbay 已提交
474

O
Oded Gabbay 已提交
475
/* Host <-> CPU-CP shared memory size */
476
#define HL_CPU_ACCESSIBLE_MEM_SIZE	SZ_2M
O
Oded Gabbay 已提交
477 478 479

/**
 * struct hl_hw_queue - describes a H/W transport queue.
480
 * @hw_sob: array of the used H/W SOBs by this H/W queue.
O
Oded Gabbay 已提交
481 482 483 484 485 486 487
 * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
 * @queue_type: type of queue.
 * @kernel_address: holds the queue's kernel virtual address.
 * @bus_address: holds the queue's DMA address.
 * @pi: holds the queue's pi value.
 * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
 * @hw_queue_id: the id of the H/W queue.
488 489
 * @cq_id: the id for the corresponding CQ for this H/W queue.
 * @msi_vec: the IRQ number of the H/W queue.
O
Oded Gabbay 已提交
490
 * @int_queue_len: length of internal queue (number of entries).
491 492 493
 * @next_sob_val: the next value to use for the currently used SOB.
 * @base_sob_id: the base SOB id of the SOBs used by this queue.
 * @base_mon_id: the base MON id of the MONs used by this queue.
O
Oded Gabbay 已提交
494
 * @valid: is the queue valid (we have array of 32 queues, not all of them
495 496 497
 *         exist).
 * @curr_sob_offset: the id offset to the currently used SOB from the
 *                   HL_RSVD_SOBS that are being used by this queue.
498
 * @supports_sync_stream: True if queue supports sync stream
O
Oded Gabbay 已提交
499 500
 */
struct hl_hw_queue {
501
	struct hl_hw_sob	hw_sob[HL_RSVD_SOBS];
O
Oded Gabbay 已提交
502 503 504 505 506
	struct hl_cs_job	**shadow_queue;
	enum hl_queue_type	queue_type;
	u64			kernel_address;
	dma_addr_t		bus_address;
	u32			pi;
507
	atomic_t		ci;
O
Oded Gabbay 已提交
508
	u32			hw_queue_id;
509 510
	u32			cq_id;
	u32			msi_vec;
O
Oded Gabbay 已提交
511
	u16			int_queue_len;
512 513 514
	u16			next_sob_val;
	u16			base_sob_id;
	u16			base_mon_id;
O
Oded Gabbay 已提交
515
	u8			valid;
516
	u8			curr_sob_offset;
517
	u8			supports_sync_stream;
O
Oded Gabbay 已提交
518 519 520 521 522 523 524
};

/**
 * struct hl_cq - describes a completion queue
 * @hdev: pointer to the device structure
 * @kernel_address: holds the queue's kernel virtual address
 * @bus_address: holds the queue's DMA address
525
 * @cq_idx: completion queue index in array
O
Oded Gabbay 已提交
526 527 528 529 530 531 532 533 534
 * @hw_queue_id: the id of the matching H/W queue
 * @ci: ci inside the queue
 * @pi: pi inside the queue
 * @free_slots_cnt: counter of free slots in queue
 */
struct hl_cq {
	struct hl_device	*hdev;
	u64			kernel_address;
	dma_addr_t		bus_address;
535
	u32			cq_idx;
O
Oded Gabbay 已提交
536 537 538 539 540
	u32			hw_queue_id;
	u32			ci;
	u32			pi;
	atomic_t		free_slots_cnt;
};
541

542 543 544 545 546 547 548 549 550 551 552 553 554 555
/**
 * struct hl_eq - describes the event queue (single one per device)
 * @hdev: pointer to the device structure
 * @kernel_address: holds the queue's kernel virtual address
 * @bus_address: holds the queue's DMA address
 * @ci: ci inside the queue
 */
struct hl_eq {
	struct hl_device	*hdev;
	u64			kernel_address;
	dma_addr_t		bus_address;
	u32			ci;
};

556

O
Oded Gabbay 已提交
557 558 559 560 561 562 563
/*
 * ASICs
 */

/**
 * enum hl_asic_type - supported ASIC types.
 * @ASIC_INVALID: Invalid ASIC type.
564
 * @ASIC_GOYA: Goya device.
565
 * @ASIC_GAUDI: Gaudi device.
O
Oded Gabbay 已提交
566 567
 */
enum hl_asic_type {
568
	ASIC_INVALID,
569 570
	ASIC_GOYA,
	ASIC_GAUDI
O
Oded Gabbay 已提交
571 572
};

573 574
struct hl_cs_parser;

575 576
/**
 * enum hl_pm_mng_profile - power management profile.
577
 * @PM_AUTO: internal clock is set by the Linux driver.
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
 * @PM_MANUAL: internal clock is set by the user.
 * @PM_LAST: last power management type.
 */
enum hl_pm_mng_profile {
	PM_AUTO = 1,
	PM_MANUAL,
	PM_LAST
};

/**
 * enum hl_pll_frequency - PLL frequency.
 * @PLL_HIGH: high frequency.
 * @PLL_LOW: low frequency.
 * @PLL_LAST: last frequency values that were configured by the user.
 */
enum hl_pll_frequency {
	PLL_HIGH = 1,
	PLL_LOW,
	PLL_LAST
};

599 600 601 602 603 604 605 606 607
#define PLL_REF_CLK 50

enum div_select_defs {
	DIV_SEL_REF_CLK = 0,
	DIV_SEL_PLL_CLK = 1,
	DIV_SEL_DIVIDED_REF = 2,
	DIV_SEL_DIVIDED_PLL = 3,
};

O
Oded Gabbay 已提交
608 609 610 611 612
/**
 * struct hl_asic_funcs - ASIC specific functions that are can be called from
 *                        common code.
 * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
 * @early_fini: tears down what was done in early_init.
613 614
 * @late_init: sets up late driver/hw state (post hw_init) - Optional.
 * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
O
Oded Gabbay 已提交
615 616
 * @sw_init: sets up driver state, does not configure H/W.
 * @sw_fini: tears down driver state, does not configure H/W.
617 618
 * @hw_init: sets up the H/W state.
 * @hw_fini: tears down the H/W state.
619 620 621
 * @halt_engines: halt engines, needed for reset sequence. This also disables
 *                interrupts from the device. Should be called before
 *                hw_fini and before CS rollback.
O
Oded Gabbay 已提交
622 623
 * @suspend: handles IP specific H/W or SW changes for suspend.
 * @resume: handles IP specific H/W or SW changes for resume.
624
 * @cb_mmap: maps a CB.
O
Oded Gabbay 已提交
625
 * @ring_doorbell: increment PI on a given QMAN.
626 627 628 629 630
 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
 *             function because the PQs are located in different memory areas
 *             per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
 *             writing the PQE must match the destination memory area
 *             properties.
631 632 633 634 635 636 637 638
 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
 *                           dma_alloc_coherent(). This is ASIC function because
 *                           its implementation is not trivial when the driver
 *                           is loaded in simulation mode (not upstreamed).
 * @asic_dma_free_coherent:  Free coherent DMA memory by calling
 *                           dma_free_coherent(). This is ASIC function because
 *                           its implementation is not trivial when the driver
 *                           is loaded in simulation mode (not upstreamed).
O
Oded Gabbay 已提交
639 640
 * @get_int_queue_base: get the internal queue base address.
 * @test_queues: run simple test on all queues for sanity check.
641 642 643
 * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
 *                        size of allocation is HL_DMA_POOL_BLK_SIZE.
 * @asic_dma_pool_free: free small DMA allocation from pool.
O
Oded Gabbay 已提交
644 645
 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
646 647 648 649 650
 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
 * @cs_parser: parse Command Submission.
 * @asic_dma_map_sg: DMA map scatter-gather list.
 * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
 * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
651
 * @update_eq_ci: update event queue CI.
652 653
 * @context_switch: called upon ASID context switch.
 * @restore_phase_topology: clear all SOBs amd MONs.
O
Oded Gabbay 已提交
654 655
 * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
 * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
656
 * @add_device_attr: add ASIC specific device attributes.
O
Oded Gabbay 已提交
657
 * @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
658
 * @set_pll_profile: change PLL profile (manual/automatic).
659
 * @get_events_stat: retrieve event queue entries histogram.
660 661
 * @read_pte: read MMU page table entry from DRAM.
 * @write_pte: write MMU page table entry to DRAM.
662 663
 * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
 *                        (L1 only) or hard (L0 & L1) flush.
664 665
 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
 *                              ASID-VA-size mask.
O
Oded Gabbay 已提交
666
 * @send_heartbeat: send is-alive packet to CPU-CP and verify response.
667 668 669
 * @set_clock_gating: enable/disable clock gating per engine according to
 *                    clock gating mask in hdev
 * @disable_clock_gating: disable clock gating completely
670
 * @debug_coresight: perform certain actions on Coresight for debugging.
671
 * @is_device_idle: return true if device is idle, false otherwise.
672
 * @soft_reset_late_init: perform certain actions needed after soft reset.
O
Oded Gabbay 已提交
673 674
 * @hw_queues_lock: acquire H/W queues lock.
 * @hw_queues_unlock: release H/W queues lock.
O
Oded Gabbay 已提交
675
 * @get_pci_id: retrieve PCI ID.
676
 * @get_eeprom_data: retrieve EEPROM data from F/W.
677 678 679 680 681
 * @send_cpu_message: send message to F/W. If the message is timedout, the
 *                    driver will eventually reset the device. The timeout can
 *                    be determined by the calling function or it can be 0 and
 *                    then the timeout is the default timeout for the specific
 *                    ASIC
682
 * @get_hw_state: retrieve the H/W state
683 684
 * @pci_bars_map: Map PCI BARs.
 * @init_iatu: Initialize the iATU unit inside the PCI controller.
685 686
 * @rreg: Read a register. Needed for simulator support.
 * @wreg: Write a register. Needed for simulator support.
687
 * @halt_coresight: stop the ETF and ETR traces.
688
 * @ctx_init: context dependent initialization.
689
 * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
690
 * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
691 692 693
 * @read_device_fw_version: read the device's firmware versions that are
 *                          contained in registers
 * @load_firmware_to_device: load the firmware to the device's memory
694
 * @load_boot_fit_to_device: load boot fit to device's memory
695 696 697 698 699
 * @get_signal_cb_size: Get signal CB size.
 * @get_wait_cb_size: Get wait CB size.
 * @gen_signal_cb: Generate a signal CB.
 * @gen_wait_cb: Generate a wait CB.
 * @reset_sob: Reset a SOB.
700 701
 * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
 *                        firmware configuration
702
 * @get_device_time: Get the device time.
O
Oded Gabbay 已提交
703 704 705 706
 */
struct hl_asic_funcs {
	int (*early_init)(struct hl_device *hdev);
	int (*early_fini)(struct hl_device *hdev);
707 708
	int (*late_init)(struct hl_device *hdev);
	void (*late_fini)(struct hl_device *hdev);
O
Oded Gabbay 已提交
709 710
	int (*sw_init)(struct hl_device *hdev);
	int (*sw_fini)(struct hl_device *hdev);
711 712
	int (*hw_init)(struct hl_device *hdev);
	void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
713
	void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
O
Oded Gabbay 已提交
714 715
	int (*suspend)(struct hl_device *hdev);
	int (*resume)(struct hl_device *hdev);
716
	int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
717
			void *cpu_addr, dma_addr_t dma_addr, size_t size);
O
Oded Gabbay 已提交
718
	void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
719 720
	void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
			struct hl_bd *bd);
721
	void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
O
Oded Gabbay 已提交
722
					dma_addr_t *dma_handle, gfp_t flag);
723
	void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
O
Oded Gabbay 已提交
724
					void *cpu_addr, dma_addr_t dma_handle);
O
Oded Gabbay 已提交
725 726 727
	void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
				dma_addr_t *dma_handle, u16 *queue_len);
	int (*test_queues)(struct hl_device *hdev);
728
	void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
O
Oded Gabbay 已提交
729
				gfp_t mem_flags, dma_addr_t *dma_handle);
730
	void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
O
Oded Gabbay 已提交
731 732 733 734 735
				dma_addr_t dma_addr);
	void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
				size_t size, dma_addr_t *dma_handle);
	void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
				size_t size, void *vaddr);
736
	void (*hl_dma_unmap_sg)(struct hl_device *hdev,
737
				struct scatterlist *sgl, int nents,
738 739 740
				enum dma_data_direction dir);
	int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
	int (*asic_dma_map_sg)(struct hl_device *hdev,
741
				struct scatterlist *sgl, int nents,
742 743 744
				enum dma_data_direction dir);
	u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
					struct sg_table *sgt);
745 746
	void (*add_end_of_cb_packets)(struct hl_device *hdev,
					u64 kernel_address, u32 len,
747 748
					u64 cq_addr, u32 cq_val, u32 msix_num,
					bool eb);
749
	void (*update_eq_ci)(struct hl_device *hdev, u32 val);
750 751
	int (*context_switch)(struct hl_device *hdev, u32 asid);
	void (*restore_phase_topology)(struct hl_device *hdev);
O
Oded Gabbay 已提交
752 753
	int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
	int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
754 755
	int (*debugfs_read64)(struct hl_device *hdev, u64 addr, u64 *val);
	int (*debugfs_write64)(struct hl_device *hdev, u64 addr, u64 val);
756 757
	void (*add_device_attr)(struct hl_device *hdev,
				struct attribute_group *dev_attr_grp);
758 759
	void (*handle_eqe)(struct hl_device *hdev,
				struct hl_eq_entry *eq_entry);
760 761
	void (*set_pll_profile)(struct hl_device *hdev,
			enum hl_pll_frequency freq);
762 763
	void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
				u32 *size);
764 765
	u64 (*read_pte)(struct hl_device *hdev, u64 addr);
	void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
766
	int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
767
					u32 flags);
768
	int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
769
			u32 asid, u64 va, u64 size);
770
	int (*send_heartbeat)(struct hl_device *hdev);
771
	void (*set_clock_gating)(struct hl_device *hdev);
772
	void (*disable_clock_gating)(struct hl_device *hdev);
773
	int (*debug_coresight)(struct hl_device *hdev, void *data);
774
	bool (*is_device_idle)(struct hl_device *hdev, u64 *mask,
775
				struct seq_file *s);
776
	int (*soft_reset_late_init)(struct hl_device *hdev);
O
Oded Gabbay 已提交
777 778
	void (*hw_queues_lock)(struct hl_device *hdev);
	void (*hw_queues_unlock)(struct hl_device *hdev);
O
Oded Gabbay 已提交
779
	u32 (*get_pci_id)(struct hl_device *hdev);
780 781
	int (*get_eeprom_data)(struct hl_device *hdev, void *data,
				size_t max_size);
O
Oded Gabbay 已提交
782 783
	int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
				u16 len, u32 timeout, long *result);
784
	enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
785 786
	int (*pci_bars_map)(struct hl_device *hdev);
	int (*init_iatu)(struct hl_device *hdev);
787 788
	u32 (*rreg)(struct hl_device *hdev, u32 reg);
	void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
789
	void (*halt_coresight)(struct hl_device *hdev);
790
	int (*ctx_init)(struct hl_ctx *ctx);
791
	int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
792
	u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
793 794 795
	void (*read_device_fw_version)(struct hl_device *hdev,
					enum hl_fw_component fwc);
	int (*load_firmware_to_device)(struct hl_device *hdev);
796
	int (*load_boot_fit_to_device)(struct hl_device *hdev);
797 798 799 800 801 802
	u32 (*get_signal_cb_size)(struct hl_device *hdev);
	u32 (*get_wait_cb_size)(struct hl_device *hdev);
	void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id);
	void (*gen_wait_cb)(struct hl_device *hdev, void *data, u16 sob_id,
				u16 sob_val, u16 mon_id, u32 q_idx);
	void (*reset_sob)(struct hl_device *hdev, void *data);
803
	void (*set_dma_mask_from_fw)(struct hl_device *hdev);
804
	u64 (*get_device_time)(struct hl_device *hdev);
O
Oded Gabbay 已提交
805
};
O
Oded Gabbay 已提交
806

807 808 809 810 811 812 813

/*
 * CONTEXTS
 */

#define HL_KERNEL_ASID_ID	0

814 815 816 817 818 819 820 821 822 823 824 825 826 827
/**
 * struct hl_va_range - virtual addresses range.
 * @lock: protects the virtual addresses list.
 * @list: list of virtual addresses blocks available for mappings.
 * @start_addr: range start address.
 * @end_addr: range end address.
 */
struct hl_va_range {
	struct mutex		lock;
	struct list_head	list;
	u64			start_addr;
	u64			end_addr;
};

828 829
/**
 * struct hl_ctx - user/kernel context.
830 831
 * @mem_hash: holds mapping from virtual address to virtual memory area
 *		descriptor (hl_vm_phys_pg_list or hl_userptr).
832
 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
833
 * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
834 835 836
 * @hdev: pointer to the device structure.
 * @refcount: reference counter for the context. Context is released only when
 *		this hits 0l. It is incremented on CS and CS_WAIT.
837
 * @cs_pending: array of hl fence objects representing pending CS.
838
 * @host_va_range: holds available virtual addresses for host mappings.
839 840
 * @host_huge_va_range: holds available virtual addresses for host mappings
 *                      with huge pages.
841 842
 * @dram_va_range: holds available virtual addresses for DRAM mappings.
 * @mem_hash_lock: protects the mem_hash.
843 844
 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
 *            MMU hash or walking the PGT requires talking this lock.
O
Oded Gabbay 已提交
845
 * @debugfs_list: node in debugfs list of contexts.
846 847 848
 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
 *			to user so user could inquire about CS. It is used as
 *			index to cs_pending array.
849 850
 * @dram_default_hops: array that holds all hops addresses needed for default
 *                     DRAM mapping.
851
 * @cs_lock: spinlock to protect cs_sequence.
852
 * @dram_phys_mem: amount of used physical DRAM memory by this context.
853 854 855 856 857 858 859
 * @thread_ctx_switch_token: token to prevent multiple threads of the same
 *				context	from running the context switch phase.
 *				Only a single thread should run it.
 * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
 *				the context switch phase from moving to their
 *				execution phase before the context switch phase
 *				has finished.
860
 * @asid: context's unique address space ID in the device's MMU.
861
 * @handle: context's opaque handle for user
862 863
 */
struct hl_ctx {
864
	DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
865
	DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
866 867 868
	struct hl_fpriv		*hpriv;
	struct hl_device	*hdev;
	struct kref		refcount;
869
	struct hl_fence		**cs_pending;
870 871 872
	struct hl_va_range	*host_va_range;
	struct hl_va_range	*host_huge_va_range;
	struct hl_va_range	*dram_va_range;
873 874
	struct mutex		mem_hash_lock;
	struct mutex		mmu_lock;
O
Oded Gabbay 已提交
875
	struct list_head	debugfs_list;
876
	struct hl_cs_counters	cs_counters;
877
	u64			cs_sequence;
878
	u64			*dram_default_hops;
879
	spinlock_t		cs_lock;
880
	atomic64_t		dram_phys_mem;
881 882
	atomic_t		thread_ctx_switch_token;
	u32			thread_ctx_switch_wait_token;
883
	u32			asid;
884
	u32			handle;
885 886 887 888 889 890 891 892 893 894 895 896 897
};

/**
 * struct hl_ctx_mgr - for handling multiple contexts.
 * @ctx_lock: protects ctx_handles.
 * @ctx_handles: idr to hold all ctx handles.
 */
struct hl_ctx_mgr {
	struct mutex		ctx_lock;
	struct idr		ctx_handles;
};


898 899 900 901 902 903 904 905 906 907 908 909 910

/*
 * COMMAND SUBMISSIONS
 */

/**
 * struct hl_userptr - memory mapping chunk information
 * @vm_type: type of the VM.
 * @job_node: linked-list node for hanging the object on the Job's list.
 * @vec: pointer to the frame vector.
 * @sgt: pointer to the scatter-gather table that holds the pages.
 * @dir: for DMA unmapping, the direction must be supplied, so save it.
 * @debugfs_list: node in debugfs list of command submissions.
911
 * @addr: user-space virtual address of the start of the memory area.
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
 * @size: size of the memory area to pin & map.
 * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
 */
struct hl_userptr {
	enum vm_type_t		vm_type; /* must be first */
	struct list_head	job_node;
	struct frame_vector	*vec;
	struct sg_table		*sgt;
	enum dma_data_direction dir;
	struct list_head	debugfs_list;
	u64			addr;
	u32			size;
	u8			dma_mapped;
};

/**
 * struct hl_cs - command submission.
 * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
 * @ctx: the context this CS belongs to.
 * @job_list: list of the CS's jobs in the various queues.
 * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
 * @refcount: reference counter for usage of the CS.
 * @fence: pointer to the fence object of this CS.
935 936
 * @signal_fence: pointer to the fence object of the signal CS (used by wait
 *                CS only).
937
 * @finish_work: workqueue object to run when CS is completed by H/W.
938 939
 * @work_tdr: delayed work node for TDR.
 * @mirror_node : node in device mirror list of command submissions.
O
Oded Gabbay 已提交
940
 * @debugfs_list: node in debugfs list of command submissions.
941
 * @sequence: the sequence number of this CS.
942
 * @type: CS_TYPE_*.
943 944 945 946 947 948 949 950
 * @submitted: true if CS was submitted to H/W.
 * @completed: true if CS was completed by device.
 * @timedout : true if CS was timedout.
 * @tdr_active: true if TDR was activated for this CS (to prevent
 *		double TDR activation).
 * @aborted: true if CS was aborted due to some device error.
 */
struct hl_cs {
951
	u16			*jobs_in_queue_cnt;
952 953 954 955
	struct hl_ctx		*ctx;
	struct list_head	job_list;
	spinlock_t		job_lock;
	struct kref		refcount;
956 957
	struct hl_fence		*fence;
	struct hl_fence		*signal_fence;
958
	struct work_struct	finish_work;
959 960
	struct delayed_work	work_tdr;
	struct list_head	mirror_node;
O
Oded Gabbay 已提交
961
	struct list_head	debugfs_list;
962
	u64			sequence;
963
	enum hl_cs_type		type;
964 965 966 967 968 969 970
	u8			submitted;
	u8			completed;
	u8			timedout;
	u8			tdr_active;
	u8			aborted;
};

O
Oded Gabbay 已提交
971 972
/**
 * struct hl_cs_job - command submission job.
973 974 975 976 977
 * @cs_node: the node to hang on the CS jobs list.
 * @cs: the CS this job belongs to.
 * @user_cb: the CB we got from the user.
 * @patched_cb: in case of patching, this is internal CB which is submitted on
 *		the queue instead of the CB we got from the IOCTL.
O
Oded Gabbay 已提交
978
 * @finish_work: workqueue object to run when job is completed.
979 980
 * @userptr_list: linked-list of userptr mappings that belong to this job and
 *			wait for completion.
O
Oded Gabbay 已提交
981
 * @debugfs_list: node in debugfs list of command submission jobs.
T
Tomer Tayar 已提交
982
 * @queue_type: the type of the H/W queue this job is submitted to.
O
Oded Gabbay 已提交
983
 * @id: the id of this job inside a CS.
984 985 986
 * @hw_queue_id: the id of the H/W queue this job is submitted to.
 * @user_cb_size: the actual size of the CB we got from the user.
 * @job_cb_size: the actual size of the CB that we put on the queue.
T
Tomer Tayar 已提交
987 988 989
 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
 *                          handle to a kernel-allocated CB object, false
 *                          otherwise (SRAM/DRAM/host address).
990 991 992 993 994 995
 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
 *                    info is needed later, when adding the 2xMSG_PROT at the
 *                    end of the JOB, to know which barriers to put in the
 *                    MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
 *                    have streams so the engine can't be busy by another
 *                    stream.
O
Oded Gabbay 已提交
996 997
 */
struct hl_cs_job {
998 999 1000 1001
	struct list_head	cs_node;
	struct hl_cs		*cs;
	struct hl_cb		*user_cb;
	struct hl_cb		*patched_cb;
O
Oded Gabbay 已提交
1002
	struct work_struct	finish_work;
1003
	struct list_head	userptr_list;
O
Oded Gabbay 已提交
1004
	struct list_head	debugfs_list;
T
Tomer Tayar 已提交
1005
	enum hl_queue_type	queue_type;
O
Oded Gabbay 已提交
1006
	u32			id;
1007 1008 1009
	u32			hw_queue_id;
	u32			user_cb_size;
	u32			job_cb_size;
T
Tomer Tayar 已提交
1010
	u8			is_kernel_allocated_cb;
1011
	u8			contains_dma_pkt;
1012 1013 1014
};

/**
T
Tomer Tayar 已提交
1015
 * struct hl_cs_parser - command submission parser properties.
1016 1017 1018 1019 1020 1021
 * @user_cb: the CB we got from the user.
 * @patched_cb: in case of patching, this is internal CB which is submitted on
 *		the queue instead of the CB we got from the IOCTL.
 * @job_userptr_list: linked-list of userptr mappings that belong to the related
 *			job and wait for completion.
 * @cs_sequence: the sequence number of the related CS.
T
Tomer Tayar 已提交
1022
 * @queue_type: the type of the H/W queue this job is submitted to.
1023 1024 1025 1026 1027
 * @ctx_id: the ID of the context the related CS belongs to.
 * @hw_queue_id: the id of the H/W queue this job is submitted to.
 * @user_cb_size: the actual size of the CB we got from the user.
 * @patched_cb_size: the size of the CB after parsing.
 * @job_id: the id of the related job inside the related CS.
T
Tomer Tayar 已提交
1028 1029 1030
 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
 *                          handle to a kernel-allocated CB object, false
 *                          otherwise (SRAM/DRAM/host address).
1031 1032 1033 1034 1035 1036
 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
 *                    info is needed later, when adding the 2xMSG_PROT at the
 *                    end of the JOB, to know which barriers to put in the
 *                    MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
 *                    have streams so the engine can't be busy by another
 *                    stream.
1037 1038 1039 1040 1041 1042
 */
struct hl_cs_parser {
	struct hl_cb		*user_cb;
	struct hl_cb		*patched_cb;
	struct list_head	*job_userptr_list;
	u64			cs_sequence;
T
Tomer Tayar 已提交
1043
	enum hl_queue_type	queue_type;
1044 1045 1046 1047 1048
	u32			ctx_id;
	u32			hw_queue_id;
	u32			user_cb_size;
	u32			patched_cb_size;
	u8			job_id;
T
Tomer Tayar 已提交
1049
	u8			is_kernel_allocated_cb;
1050
	u8			contains_dma_pkt;
O
Oded Gabbay 已提交
1051
};
1052 1053


1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
/*
 * MEMORY STRUCTURE
 */

/**
 * struct hl_vm_hash_node - hash element from virtual address to virtual
 *				memory area descriptor (hl_vm_phys_pg_list or
 *				hl_userptr).
 * @node: node to hang on the hash table in context object.
 * @vaddr: key virtual address.
 * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
 */
struct hl_vm_hash_node {
	struct hlist_node	node;
	u64			vaddr;
	void			*ptr;
};

/**
 * struct hl_vm_phys_pg_pack - physical page pack.
 * @vm_type: describes the type of the virtual area descriptor.
 * @pages: the physical page array.
1076 1077
 * @npages: num physical pages in the pack.
 * @total_size: total size of all the pages in this list.
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
 * @mapping_cnt: number of shared mappings.
 * @asid: the context related to this list.
 * @page_size: size of each page in the pack.
 * @flags: HL_MEM_* flags related to this list.
 * @handle: the provided handle related to this list.
 * @offset: offset from the first page.
 * @contiguous: is contiguous physical memory.
 * @created_from_userptr: is product of host virtual address.
 */
struct hl_vm_phys_pg_pack {
	enum vm_type_t		vm_type; /* must be first */
	u64			*pages;
1090 1091
	u64			npages;
	u64			total_size;
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	atomic_t		mapping_cnt;
	u32			asid;
	u32			page_size;
	u32			flags;
	u32			handle;
	u32			offset;
	u8			contiguous;
	u8			created_from_userptr;
};

/**
 * struct hl_vm_va_block - virtual range block information.
 * @node: node to hang on the virtual range list in context object.
 * @start: virtual range start address.
 * @end: virtual range end address.
 * @size: virtual range size.
 */
struct hl_vm_va_block {
	struct list_head	node;
	u64			start;
	u64			end;
	u64			size;
};

/**
 * struct hl_vm - virtual memory manager for MMU.
 * @dram_pg_pool: pool for DRAM physical pages of 2MB.
 * @dram_pg_pool_refcount: reference counter for the pool usage.
 * @idr_lock: protects the phys_pg_list_handles.
 * @phys_pg_pack_handles: idr to hold all device allocations handles.
 * @init_done: whether initialization was done. We need this because VM
 *		initialization might be skipped during device initialization.
 */
struct hl_vm {
	struct gen_pool		*dram_pg_pool;
	struct kref		dram_pg_pool_refcount;
	spinlock_t		idr_lock;
	struct idr		phys_pg_pack_handles;
	u8			init_done;
};

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155

/*
 * DEBUG, PROFILING STRUCTURE
 */

/**
 * struct hl_debug_params - Coresight debug parameters.
 * @input: pointer to component specific input parameters.
 * @output: pointer to component specific output parameters.
 * @output_size: size of output buffer.
 * @reg_idx: relevant register ID.
 * @op: component operation to execute.
 * @enable: true if to enable component debugging, false otherwise.
 */
struct hl_debug_params {
	void *input;
	void *output;
	u32 output_size;
	u32 reg_idx;
	u32 op;
	bool enable;
};

O
Oded Gabbay 已提交
1156 1157 1158 1159 1160 1161 1162 1163 1164
/*
 * FILE PRIVATE STRUCTURE
 */

/**
 * struct hl_fpriv - process information stored in FD private data.
 * @hdev: habanalabs device structure.
 * @filp: pointer to the given file structure.
 * @taskpid: current process ID.
1165
 * @ctx: current executing context. TODO: remove for multiple ctx per process
1166
 * @ctx_mgr: context manager to handle multiple context for this FD.
1167
 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
O
Oded Gabbay 已提交
1168
 * @debugfs_list: list of relevant ASIC debugfs.
1169
 * @dev_node: node in the device list of file private data
O
Oded Gabbay 已提交
1170
 * @refcount: number of related contexts.
1171
 * @restore_phase_mutex: lock for context switch and restore phase.
1172
 * @is_control: true for control device, false otherwise
O
Oded Gabbay 已提交
1173 1174 1175 1176 1177
 */
struct hl_fpriv {
	struct hl_device	*hdev;
	struct file		*filp;
	struct pid		*taskpid;
1178
	struct hl_ctx		*ctx;
1179
	struct hl_ctx_mgr	ctx_mgr;
1180
	struct hl_cb_mgr	cb_mgr;
O
Oded Gabbay 已提交
1181
	struct list_head	debugfs_list;
1182
	struct list_head	dev_node;
O
Oded Gabbay 已提交
1183
	struct kref		refcount;
1184
	struct mutex		restore_phase_mutex;
1185
	u8			is_control;
O
Oded Gabbay 已提交
1186 1187 1188
};


O
Oded Gabbay 已提交
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
/*
 * DebugFS
 */

/**
 * struct hl_info_list - debugfs file ops.
 * @name: file name.
 * @show: function to output information.
 * @write: function to write to the file.
 */
struct hl_info_list {
	const char	*name;
	int		(*show)(struct seq_file *s, void *data);
	ssize_t		(*write)(struct file *file, const char __user *buf,
				size_t count, loff_t *f_pos);
};

/**
 * struct hl_debugfs_entry - debugfs dentry wrapper.
 * @dent: base debugfs entry structure.
 * @info_ent: dentry realted ops.
 * @dev_entry: ASIC specific debugfs manager.
 */
struct hl_debugfs_entry {
	struct dentry			*dent;
	const struct hl_info_list	*info_ent;
	struct hl_dbg_device_entry	*dev_entry;
};

/**
 * struct hl_dbg_device_entry - ASIC specific debugfs manager.
 * @root: root dentry.
 * @hdev: habanalabs device structure.
 * @entry_arr: array of available hl_debugfs_entry.
 * @file_list: list of available debugfs files.
 * @file_mutex: protects file_list.
 * @cb_list: list of available CBs.
 * @cb_spinlock: protects cb_list.
 * @cs_list: list of available CSs.
 * @cs_spinlock: protects cs_list.
 * @cs_job_list: list of available CB jobs.
 * @cs_job_spinlock: protects cs_job_list.
 * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
 * @userptr_spinlock: protects userptr_list.
 * @ctx_mem_hash_list: list of available contexts with MMU mappings.
 * @ctx_mem_hash_spinlock: protects cb_list.
 * @addr: next address to read/write from/to in read/write32.
 * @mmu_addr: next virtual address to translate to physical address in mmu_show.
 * @mmu_asid: ASID to use while translating in mmu_show.
 * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
 * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
 * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
 */
struct hl_dbg_device_entry {
	struct dentry			*root;
	struct hl_device		*hdev;
	struct hl_debugfs_entry		*entry_arr;
	struct list_head		file_list;
	struct mutex			file_mutex;
	struct list_head		cb_list;
	spinlock_t			cb_spinlock;
	struct list_head		cs_list;
	spinlock_t			cs_spinlock;
	struct list_head		cs_job_list;
	spinlock_t			cs_job_spinlock;
	struct list_head		userptr_list;
	spinlock_t			userptr_spinlock;
	struct list_head		ctx_mem_hash_list;
	spinlock_t			ctx_mem_hash_spinlock;
	u64				addr;
	u64				mmu_addr;
	u32				mmu_asid;
	u8				i2c_bus;
	u8				i2c_addr;
	u8				i2c_reg;
};


O
Oded Gabbay 已提交
1267 1268 1269 1270 1271
/*
 * DEVICES
 */

/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
1272
 * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
O
Oded Gabbay 已提交
1273 1274 1275
 */
#define HL_MAX_MINORS	256

O
Oded Gabbay 已提交
1276 1277 1278 1279 1280 1281 1282
/*
 * Registers read & write functions.
 */

u32 hl_rreg(struct hl_device *hdev, u32 reg);
void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);

1283 1284
#define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
#define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
O
Oded Gabbay 已提交
1285
#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n",	\
1286
			hdev->asic_funcs->rreg(hdev, (reg)))
O
Oded Gabbay 已提交
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297

#define WREG32_P(reg, val, mask)				\
	do {							\
		u32 tmp_ = RREG32(reg);				\
		tmp_ &= (mask);					\
		tmp_ |= ((val) & ~(mask));			\
		WREG32(reg, tmp_);				\
	} while (0)
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
#define RMWREG32(reg, val, mask)				\
	do {							\
		u32 tmp_ = RREG32(reg);				\
		tmp_ &= ~(mask);				\
		tmp_ |= ((val) << __ffs(mask));			\
		WREG32(reg, tmp_);				\
	} while (0)

#define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))

O
Oded Gabbay 已提交
1308 1309
#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
1310 1311 1312 1313
#define WREG32_FIELD(reg, offset, field, val)	\
	WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
				~REG_FIELD_MASK(reg, field)) | \
				(val) << REG_FIELD_SHIFT(reg, field))
O
Oded Gabbay 已提交
1314

O
Oded Gabbay 已提交
1315 1316 1317
/* Timeout should be longer when working with simulator but cap the
 * increased timeout to some maximum
 */
1318 1319
#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
({ \
1320 1321 1322 1323
	ktime_t __timeout; \
	if (hdev->pdev) \
		__timeout = ktime_add_us(ktime_get(), timeout_us); \
	else \
O
Oded Gabbay 已提交
1324 1325 1326
		__timeout = ktime_add_us(ktime_get(),\
				min((u64)(timeout_us * 10), \
					(u64) HL_SIM_MAX_TIMEOUT_US)); \
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
	might_sleep_if(sleep_us); \
	for (;;) { \
		(val) = RREG32(addr); \
		if (cond) \
			break; \
		if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
			(val) = RREG32(addr); \
			break; \
		} \
		if (sleep_us) \
			usleep_range((sleep_us >> 2) + 1, sleep_us); \
	} \
	(cond) ? 0 : -ETIMEDOUT; \
})

1342 1343 1344
/*
 * address in this macro points always to a memory location in the
 * host's (server's) memory. That location is updated asynchronously
1345 1346 1347 1348 1349 1350 1351 1352
 * either by the direct access of the device or by another core.
 *
 * To work both in LE and BE architectures, we need to distinguish between the
 * two states (device or another core updates the memory location). Therefore,
 * if mem_written_by_device is true, the host memory being polled will be
 * updated directly by the device. If false, the host memory being polled will
 * be updated by host CPU. Required so host knows whether or not the memory
 * might need to be byte-swapped before returning value to caller.
1353
 */
1354 1355
#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
				mem_written_by_device) \
1356 1357 1358 1359 1360
({ \
	ktime_t __timeout; \
	if (hdev->pdev) \
		__timeout = ktime_add_us(ktime_get(), timeout_us); \
	else \
O
Oded Gabbay 已提交
1361 1362 1363
		__timeout = ktime_add_us(ktime_get(),\
				min((u64)(timeout_us * 10), \
					(u64) HL_SIM_MAX_TIMEOUT_US)); \
1364 1365 1366 1367 1368
	might_sleep_if(sleep_us); \
	for (;;) { \
		/* Verify we read updates done by other cores or by device */ \
		mb(); \
		(val) = *((u32 *) (uintptr_t) (addr)); \
1369
		if (mem_written_by_device) \
1370
			(val) = le32_to_cpu(*(__le32 *) &(val)); \
1371 1372 1373 1374
		if (cond) \
			break; \
		if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
			(val) = *((u32 *) (uintptr_t) (addr)); \
1375
			if (mem_written_by_device) \
1376
				(val) = le32_to_cpu(*(__le32 *) &(val)); \
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
			break; \
		} \
		if (sleep_us) \
			usleep_range((sleep_us >> 2) + 1, sleep_us); \
	} \
	(cond) ? 0 : -ETIMEDOUT; \
})

#define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
					timeout_us) \
({ \
	ktime_t __timeout; \
	if (hdev->pdev) \
		__timeout = ktime_add_us(ktime_get(), timeout_us); \
	else \
O
Oded Gabbay 已提交
1392 1393 1394
		__timeout = ktime_add_us(ktime_get(),\
				min((u64)(timeout_us * 10), \
					(u64) HL_SIM_MAX_TIMEOUT_US)); \
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
	might_sleep_if(sleep_us); \
	for (;;) { \
		(val) = readl(addr); \
		if (cond) \
			break; \
		if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
			(val) = readl(addr); \
			break; \
		} \
		if (sleep_us) \
			usleep_range((sleep_us >> 2) + 1, sleep_us); \
	} \
	(cond) ? 0 : -ETIMEDOUT; \
})
1409

1410 1411
struct hwmon_chip_info;

1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
/**
 * struct hl_device_reset_work - reset workqueue task wrapper.
 * @reset_work: reset work to be done.
 * @hdev: habanalabs device structure.
 */
struct hl_device_reset_work {
	struct work_struct		reset_work;
	struct hl_device		*hdev;
};

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
/**
 * struct hl_device_idle_busy_ts - used for calculating device utilization rate.
 * @idle_to_busy_ts: timestamp where device changed from idle to busy.
 * @busy_to_idle_ts: timestamp where device changed from busy to idle.
 */
struct hl_device_idle_busy_ts {
	ktime_t				idle_to_busy_ts;
	ktime_t				busy_to_idle_ts;
};

1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469

/**
 * struct hl_mmu_priv - used for holding per-device mmu internal information.
 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
 * @mmu_shadow_hop0: shadow array of hop0 tables.
 */
struct hl_mmu_priv {
	struct gen_pool *mmu_pgt_pool;
	void *mmu_shadow_hop0;
};

/**
 * struct hl_mmu_funcs - Device related MMU functions.
 * @init: initialize the MMU module.
 * @fini: release the MMU module.
 * @ctx_init: Initialize a context for using the MMU module.
 * @ctx_fini: disable a ctx from using the mmu module.
 * @map: maps a virtual address to physical address for a context.
 * @unmap: unmap a virtual address of a context.
 * @flush: flush all writes from all cores to reach device MMU.
 * @swap_out: marks all mapping of the given context as swapped out.
 * @swap_in: marks all mapping of the given context as swapped in.
 */
struct hl_mmu_funcs {
	int (*init)(struct hl_device *hdev);
	void (*fini)(struct hl_device *hdev);
	int (*ctx_init)(struct hl_ctx *ctx);
	void (*ctx_fini)(struct hl_ctx *ctx);
	int (*map)(struct hl_ctx *ctx,
			u64 virt_addr, u64 phys_addr, u32 page_size,
			bool is_dram_addr);
	int (*unmap)(struct hl_ctx *ctx,
			u64 virt_addr, bool is_dram_addr);
	void (*flush)(struct hl_ctx *ctx);
	void (*swap_out)(struct hl_ctx *ctx);
	void (*swap_in)(struct hl_ctx *ctx);
};

O
Oded Gabbay 已提交
1470 1471 1472
/**
 * struct hl_device - habanalabs device structure.
 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
O
Ofir Bitton 已提交
1473 1474 1475
 * @pcie_bar_phys: array of available PCIe bars physical addresses.
 *		   (required only for PCI address match mode)
 * @pcie_bar: array of available PCIe bars virtual addresses.
O
Oded Gabbay 已提交
1476
 * @rmmio: configuration area address on SRAM.
O
Oded Gabbay 已提交
1477
 * @cdev: related char device.
1478 1479 1480
 * @cdev_ctrl: char device for control operations only (INFO IOCTL)
 * @dev: related kernel basic device structure.
 * @dev_ctrl: related kernel device structure for the control device
1481
 * @work_freq: delayed work to lower device frequency if possible.
O
Oded Gabbay 已提交
1482 1483
 * @work_heartbeat: delayed work for CPU-CP is-alive check.
 * @asic_name: ASIC specific name.
O
Oded Gabbay 已提交
1484
 * @asic_type: ASIC specific type.
O
Oded Gabbay 已提交
1485
 * @completion_queue: array of hl_cq.
1486 1487
 * @cq_wq: work queues of completion queues for executing work in process
 *         context.
O
Oded Gabbay 已提交
1488
 * @eq_wq: work queue of event queue for executing work in process context.
1489
 * @kernel_ctx: Kernel driver context structure.
O
Oded Gabbay 已提交
1490
 * @kernel_queues: array of hl_hw_queue.
1491 1492
 * @hw_queues_mirror_list: CS mirror list for TDR.
 * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
1493
 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
O
Oded Gabbay 已提交
1494
 * @event_queue: event queue for IRQ from CPU-CP.
O
Oded Gabbay 已提交
1495
 * @dma_pool: DMA pool for small allocations.
O
Oded Gabbay 已提交
1496 1497 1498
 * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
 * @cpu_accessible_dma_address: Host <-> CPU-CP shared memory DMA address.
 * @cpu_accessible_dma_pool: Host <-> CPU-CP shared memory pool.
1499 1500
 * @asid_bitmap: holds used/available ASIDs.
 * @asid_mutex: protects asid_bitmap.
O
Oded Gabbay 已提交
1501
 * @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
1502
 * @debug_lock: protects critical section of setting debug mode for device
O
Oded Gabbay 已提交
1503 1504 1505
 * @asic_prop: ASIC specific immutable properties.
 * @asic_funcs: ASIC specific functions.
 * @asic_specific: ASIC specific information to use only from ASIC files.
1506
 * @vm: virtual memory manager for MMU.
1507
 * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
1508 1509 1510
 * @hwmon_dev: H/W monitor device.
 * @pm_mng_profile: current power management profile.
 * @hl_chip_info: ASIC's sensors information.
O
Oded Gabbay 已提交
1511
 * @hl_debugfs: device's debugfs manager.
1512 1513
 * @cb_pool: list of preallocated CBs.
 * @cb_pool_lock: protects the CB pool.
1514 1515 1516 1517
 * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
 * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
 * @internal_cb_pool: internal command buffer memory pool.
 * @internal_cb_va_base: internal cb pool mmu virtual address base
1518 1519 1520
 * @fpriv_list: list of file private data structures. Each structure is created
 *              when a user opens the device
 * @fpriv_list_lock: protects the fpriv_list
1521
 * @compute_ctx: current compute context executing.
1522 1523
 * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
 *                    and vice-versa
1524
 * @aggregated_cs_counters: aggregated cs counters among all contexts
1525 1526
 * @mmu_priv: device-specific MMU data.
 * @mmu_func: device-related MMU functions.
1527
 * @dram_used_mem: current DRAM memory consumption.
1528
 * @timeout_jiffies: device CS timeout value.
1529
 * @max_power: the max power of the device, as configured by the sysadmin. This
1530 1531
 *             value is saved so in case of hard-reset, the driver will restore
 *             this value and update the F/W after the re-initialization
1532 1533 1534
 * @clock_gating_mask: is clock gating enabled. bitmask that represents the
 *                     different engines. See debugfs-driver-habanalabs for
 *                     details.
1535
 * @in_reset: is device in reset flow.
1536
 * @curr_pll_profile: current PLL profile.
1537 1538
 * @card_type: Various ASICs have several card types. This indicates the card
 *             type of the current device.
1539 1540
 * @cs_active_cnt: number of active command submissions on this device (active
 *                 means already in H/W queues)
1541
 * @major: habanalabs kernel driver major.
1542
 * @high_pll: high PLL profile frequency.
1543 1544
 * @soft_reset_cnt: number of soft reset since the driver was loaded.
 * @hard_reset_cnt: number of hard reset since the driver was loaded.
1545
 * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
1546
 * @clk_throttling_reason: bitmask represents the current clk throttling reasons
O
Oded Gabbay 已提交
1547
 * @id: device minor.
1548
 * @id_control: minor of the control device
1549 1550
 * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
 *                    addresses.
O
Oded Gabbay 已提交
1551
 * @disabled: is device disabled.
1552 1553
 * @late_init_done: is late init stage was done during initialization.
 * @hwmon_initialized: is H/W monitor sensors was initialized.
1554
 * @hard_reset_pending: is there a hard reset work pending.
O
Oded Gabbay 已提交
1555
 * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
1556 1557
 * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
 *                   otherwise.
1558
 * @dram_supports_virtual_memory: is MMU enabled towards DRAM.
1559
 * @dram_default_page_mapping: is DRAM default page mapping enabled.
1560 1561
 * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
 *                   huge pages.
1562
 * @init_done: is the initialization of the device done.
1563
 * @mmu_enable: is MMU enabled.
1564
 * @mmu_huge_page_opt: is MMU huge pages optimization enabled.
1565
 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
1566
 * @dma_mask: the dma mask that was set for this device
1567
 * @in_debug: is device under debug. This, together with fpriv_list, enforces
1568
 *            that only a single user is configuring the debug infrastructure.
1569 1570
 * @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant
 *                           only to POWER9 machines.
1571
 * @cdev_sysfs_created: were char devices and sysfs nodes created.
1572
 * @stop_on_err: true if engines should stop on error.
1573
 * @supports_sync_stream: is sync stream supported.
1574
 * @sync_stream_queue_idx: helper index for sync stream queues initialization.
1575
 * @supports_coresight: is CoreSight supported.
1576
 * @supports_soft_reset: is soft reset supported.
O
Oded Gabbay 已提交
1577 1578 1579
 */
struct hl_device {
	struct pci_dev			*pdev;
O
Ofir Bitton 已提交
1580 1581
	u64				pcie_bar_phys[HL_PCI_NUM_BARS];
	void __iomem			*pcie_bar[HL_PCI_NUM_BARS];
O
Oded Gabbay 已提交
1582
	void __iomem			*rmmio;
O
Oded Gabbay 已提交
1583
	struct cdev			cdev;
1584
	struct cdev			cdev_ctrl;
O
Oded Gabbay 已提交
1585
	struct device			*dev;
1586
	struct device			*dev_ctrl;
1587
	struct delayed_work		work_freq;
1588
	struct delayed_work		work_heartbeat;
1589
	char				asic_name[32];
O
Oded Gabbay 已提交
1590
	enum hl_asic_type		asic_type;
O
Oded Gabbay 已提交
1591
	struct hl_cq			*completion_queue;
1592
	struct workqueue_struct		**cq_wq;
1593
	struct workqueue_struct		*eq_wq;
1594
	struct hl_ctx			*kernel_ctx;
O
Oded Gabbay 已提交
1595
	struct hl_hw_queue		*kernel_queues;
1596 1597
	struct list_head		hw_queues_mirror_list;
	spinlock_t			hw_queues_mirror_lock;
1598
	struct hl_cb_mgr		kernel_cb_mgr;
1599
	struct hl_eq			event_queue;
O
Oded Gabbay 已提交
1600 1601 1602 1603
	struct dma_pool			*dma_pool;
	void				*cpu_accessible_dma_mem;
	dma_addr_t			cpu_accessible_dma_address;
	struct gen_pool			*cpu_accessible_dma_pool;
1604 1605
	unsigned long			*asid_bitmap;
	struct mutex			asid_mutex;
O
Oded Gabbay 已提交
1606
	struct mutex			send_cpu_message_lock;
1607
	struct mutex			debug_lock;
O
Oded Gabbay 已提交
1608 1609 1610
	struct asic_fixed_properties	asic_prop;
	const struct hl_asic_funcs	*asic_funcs;
	void				*asic_specific;
1611 1612
	struct hl_vm			vm;
	struct mutex			mmu_cache_lock;
1613 1614 1615
	struct device			*hwmon_dev;
	enum hl_pm_mng_profile		pm_mng_profile;
	struct hwmon_chip_info		*hl_chip_info;
1616

O
Oded Gabbay 已提交
1617 1618
	struct hl_dbg_device_entry	hl_debugfs;

1619 1620 1621
	struct list_head		cb_pool;
	spinlock_t			cb_pool_lock;

1622 1623 1624 1625 1626
	void				*internal_cb_pool_virt_addr;
	dma_addr_t			internal_cb_pool_dma_addr;
	struct gen_pool			*internal_cb_pool;
	u64				internal_cb_va_base;

1627 1628 1629
	struct list_head		fpriv_list;
	struct mutex			fpriv_list_lock;

1630
	struct hl_ctx			*compute_ctx;
1631

1632 1633
	struct hl_device_idle_busy_ts	*idle_busy_ts_arr;

1634 1635
	struct hl_cs_counters		aggregated_cs_counters;

1636 1637 1638
	struct hl_mmu_priv		mmu_priv;
	struct hl_mmu_funcs		mmu_func;

1639
	atomic64_t			dram_used_mem;
1640 1641
	u64				timeout_jiffies;
	u64				max_power;
1642
	u64				clock_gating_mask;
1643
	atomic_t			in_reset;
1644
	enum hl_pll_frequency		curr_pll_profile;
1645
	enum cpucp_card_types		card_type;
1646
	int				cs_active_cnt;
O
Oded Gabbay 已提交
1647
	u32				major;
1648
	u32				high_pll;
1649 1650
	u32				soft_reset_cnt;
	u32				hard_reset_cnt;
1651
	u32				idle_busy_ts_idx;
1652
	u32				clk_throttling_reason;
O
Oded Gabbay 已提交
1653
	u16				id;
1654
	u16				id_control;
1655
	u16				cpu_pci_msb_addr;
O
Oded Gabbay 已提交
1656
	u8				disabled;
1657 1658
	u8				late_init_done;
	u8				hwmon_initialized;
1659 1660
	u8				hard_reset_pending;
	u8				heartbeat;
1661
	u8				reset_on_lockup;
1662
	u8				dram_supports_virtual_memory;
1663
	u8				dram_default_page_mapping;
1664
	u8				pmmu_huge_range;
1665
	u8				init_done;
1666
	u8				device_cpu_disabled;
1667
	u8				dma_mask;
1668
	u8				in_debug;
1669
	u8				power9_64bit_dma_enable;
1670
	u8				cdev_sysfs_created;
1671
	u8				stop_on_err;
1672
	u8				supports_sync_stream;
1673
	u8				sync_stream_queue_idx;
1674
	u8				supports_coresight;
1675
	u8				supports_soft_reset;
O
Oded Gabbay 已提交
1676 1677

	/* Parameters for bring-up */
1678
	u8				mmu_enable;
1679
	u8				mmu_huge_page_opt;
1680
	u8				cpu_enable;
O
Oded Gabbay 已提交
1681
	u8				reset_pcilink;
O
Oded Gabbay 已提交
1682
	u8				cpu_queues_enable;
1683 1684
	u8				fw_loading;
	u8				pldm;
1685 1686 1687 1688 1689 1690
	u8				axi_drain;
	u8				sram_scrambler_enable;
	u8				dram_scrambler_enable;
	u8				hard_reset_on_fw_events;
	u8				bmc_enable;
	u8				rl_enable;
O
Oded Gabbay 已提交
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
};


/*
 * IOCTLs
 */

/**
 * typedef hl_ioctl_t - typedef for ioctl function in the driver
 * @hpriv: pointer to the FD's private data, which contains state of
 *		user process
 * @data: pointer to the input/output arguments structure of the IOCTL
 *
 * Return: 0 for success, negative value for error
 */
typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);

/**
 * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
 * @cmd: the IOCTL code as created by the kernel macros.
 * @func: pointer to the driver's function that should be called for this IOCTL.
 */
struct hl_ioctl_desc {
	unsigned int cmd;
	hl_ioctl_t *func;
};


/*
 * Kernel module functions that can be accessed by entire module
 */

1723 1724 1725 1726 1727 1728 1729 1730 1731
/**
 * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
 * @address: The start address of the area we want to validate.
 * @size: The size in bytes of the area we want to validate.
 * @range_start_address: The start address of the valid range.
 * @range_end_address: The end address of the valid range.
 *
 * Return: true if the area is inside the valid range, false otherwise.
 */
1732
static inline bool hl_mem_area_inside_range(u64 address, u64 size,
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
				u64 range_start_address, u64 range_end_address)
{
	u64 end_address = address + size;

	if ((address >= range_start_address) &&
			(end_address <= range_end_address) &&
			(end_address > address))
		return true;

	return false;
}

/**
 * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
 * @address: The start address of the area we want to validate.
 * @size: The size in bytes of the area we want to validate.
 * @range_start_address: The start address of the valid range.
 * @range_end_address: The end address of the valid range.
 *
 * Return: true if the area overlaps part or all of the valid range,
 *		false otherwise.
 */
static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
				u64 range_start_address, u64 range_end_address)
{
	u64 end_address = address + size;

	if ((address >= range_start_address) &&
			(address < range_end_address))
		return true;

	if ((end_address >= range_start_address) &&
			(end_address < range_end_address))
		return true;

	if ((address < range_start_address) &&
			(end_address >= range_end_address))
		return true;

	return false;
}

O
Oded Gabbay 已提交
1775
int hl_device_open(struct inode *inode, struct file *filp);
1776
int hl_device_open_ctrl(struct inode *inode, struct file *filp);
1777
bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
1778
enum hl_device_status hl_device_status(struct hl_device *hdev);
1779
int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
O
Oded Gabbay 已提交
1780 1781 1782
int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
		enum hl_asic_type asic_type, int minor);
void destroy_hdev(struct hl_device *hdev);
O
Oded Gabbay 已提交
1783 1784 1785 1786
int hl_hw_queues_create(struct hl_device *hdev);
void hl_hw_queues_destroy(struct hl_device *hdev);
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
				u32 cb_size, u64 cb_ptr);
1787
int hl_hw_queue_schedule_cs(struct hl_cs *cs);
O
Oded Gabbay 已提交
1788 1789
u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
1790
void hl_int_hw_queue_update_ci(struct hl_cs *cs);
1791
void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
O
Oded Gabbay 已提交
1792 1793 1794 1795 1796 1797

#define hl_queue_inc_ptr(p)		hl_hw_queue_add_ptr(p, 1)
#define hl_pi_2_offset(pi)		((pi) & (HL_QUEUE_LENGTH - 1))

int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
1798 1799
int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
1800 1801
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
1802 1803
irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
1804 1805
u32 hl_cq_inc_ptr(u32 ptr);

1806 1807 1808 1809 1810 1811 1812 1813
int hl_asid_init(struct hl_device *hdev);
void hl_asid_fini(struct hl_device *hdev);
unsigned long hl_asid_alloc(struct hl_device *hdev);
void hl_asid_free(struct hl_device *hdev, unsigned long asid);

int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
1814 1815
void hl_ctx_do_release(struct kref *ref);
void hl_ctx_get(struct hl_device *hdev,	struct hl_ctx *ctx);
1816
int hl_ctx_put(struct hl_ctx *ctx);
1817
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
1818 1819
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
1820

O
Oded Gabbay 已提交
1821 1822 1823 1824
int hl_device_init(struct hl_device *hdev, struct class *hclass);
void hl_device_fini(struct hl_device *hdev);
int hl_device_suspend(struct hl_device *hdev);
int hl_device_resume(struct hl_device *hdev);
1825 1826
int hl_device_reset(struct hl_device *hdev, bool hard_reset,
			bool from_hard_reset_thread);
1827 1828
void hl_hpriv_get(struct hl_fpriv *hpriv);
void hl_hpriv_put(struct hl_fpriv *hpriv);
1829
int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
1830
uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms);
1831

1832
int hl_build_hwmon_channel_info(struct hl_device *hdev,
1833
		struct cpucp_sensor *sensors_arr);
1834 1835 1836 1837 1838 1839

int hl_sysfs_init(struct hl_device *hdev);
void hl_sysfs_fini(struct hl_device *hdev);

int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
O
Oded Gabbay 已提交
1840

1841
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
1842
		u64 *handle, int ctx_id, bool internal_cb);
1843 1844 1845 1846 1847 1848 1849
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev,	struct hl_cb_mgr *mgr,
			u32 handle);
void hl_cb_put(struct hl_cb *cb);
void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
1850 1851
struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
					bool internal_cb);
1852 1853 1854
int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);

1855
void hl_cs_rollback_all(struct hl_device *hdev);
T
Tomer Tayar 已提交
1856 1857
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
		enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
1858
void hl_sob_reset_error(struct kref *ref);
1859 1860
void hl_fence_put(struct hl_fence *fence);
void hl_fence_get(struct hl_fence *fence);
1861

O
Oded Gabbay 已提交
1862
void goya_set_asic_funcs(struct hl_device *hdev);
1863
void gaudi_set_asic_funcs(struct hl_device *hdev);
O
Oded Gabbay 已提交
1864

1865 1866 1867 1868 1869 1870
int hl_vm_ctx_init(struct hl_ctx *ctx);
void hl_vm_ctx_fini(struct hl_ctx *ctx);

int hl_vm_init(struct hl_device *hdev);
void hl_vm_fini(struct hl_device *hdev);

1871
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1872
			struct hl_userptr *userptr);
1873
void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
1874 1875 1876 1877 1878 1879
void hl_userptr_delete_list(struct hl_device *hdev,
				struct list_head *userptr_list);
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
				struct list_head *userptr_list,
				struct hl_userptr **userptr);

1880 1881
int hl_mmu_init(struct hl_device *hdev);
void hl_mmu_fini(struct hl_device *hdev);
1882
int hl_mmu_ctx_init(struct hl_ctx *ctx);
1883
void hl_mmu_ctx_fini(struct hl_ctx *ctx);
1884 1885 1886 1887
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
		u32 page_size, bool flush_pte);
int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
		bool flush_pte);
1888 1889
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
1890 1891
int hl_mmu_if_set_funcs(struct hl_device *hdev);
void hl_mmu_v1_set_funcs(struct hl_device *hdev);
1892

1893
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
1894 1895 1896 1897
				void __iomem *dst);
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
				u16 len, u32 timeout, long *result);
1898 1899 1900
int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
		size_t irq_arr_size);
1901 1902 1903 1904 1905 1906
int hl_fw_test_cpu_queue(struct hl_device *hdev);
void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
						dma_addr_t *dma_handle);
void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
					void *vaddr);
int hl_fw_send_heartbeat(struct hl_device *hdev);
1907
int hl_fw_cpucp_info_get(struct hl_device *hdev);
1908
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
1909
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
1910
		struct hl_info_pci_counters *counters);
1911
int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
1912
			u64 *total_energy);
1913
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
1914 1915 1916
			u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
			u32 boot_err0_reg, bool skip_bmc,
			u32 cpu_timeout, u32 boot_fit_timeout);
1917

1918 1919 1920
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
			bool is_wc[3]);
int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
O
Ofir Bitton 已提交
1921 1922 1923 1924
int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
		struct hl_inbound_pci_region *pci_region);
int hl_pci_set_outbound_region(struct hl_device *hdev,
		struct hl_outbound_pci_region *pci_region);
1925
int hl_pci_init(struct hl_device *hdev);
1926 1927
void hl_pci_fini(struct hl_device *hdev);

1928 1929
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
1930 1931
int hl_get_temperature(struct hl_device *hdev,
		       int sensor_index, u32 attr, long *value);
1932
int hl_set_temperature(struct hl_device *hdev,
1933 1934 1935 1936 1937 1938 1939 1940 1941
		       int sensor_index, u32 attr, long value);
int hl_get_voltage(struct hl_device *hdev,
		   int sensor_index, u32 attr, long *value);
int hl_get_current(struct hl_device *hdev,
		   int sensor_index, u32 attr, long *value);
int hl_get_fan_speed(struct hl_device *hdev,
		     int sensor_index, u32 attr, long *value);
int hl_get_pwm_info(struct hl_device *hdev,
		    int sensor_index, u32 attr, long *value);
1942 1943 1944
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
			long value);
u64 hl_get_max_power(struct hl_device *hdev);
1945
void hl_set_max_power(struct hl_device *hdev);
1946 1947 1948 1949
int hl_set_voltage(struct hl_device *hdev,
			int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
			int sensor_index, u32 attr, long value);
1950

O
Oded Gabbay 已提交
1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044
#ifdef CONFIG_DEBUG_FS

void hl_debugfs_init(void);
void hl_debugfs_fini(void);
void hl_debugfs_add_device(struct hl_device *hdev);
void hl_debugfs_remove_device(struct hl_device *hdev);
void hl_debugfs_add_file(struct hl_fpriv *hpriv);
void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
void hl_debugfs_add_cb(struct hl_cb *cb);
void hl_debugfs_remove_cb(struct hl_cb *cb);
void hl_debugfs_add_cs(struct hl_cs *cs);
void hl_debugfs_remove_cs(struct hl_cs *cs);
void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
void hl_debugfs_remove_userptr(struct hl_device *hdev,
				struct hl_userptr *userptr);
void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);

#else

static inline void __init hl_debugfs_init(void)
{
}

static inline void hl_debugfs_fini(void)
{
}

static inline void hl_debugfs_add_device(struct hl_device *hdev)
{
}

static inline void hl_debugfs_remove_device(struct hl_device *hdev)
{
}

static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
{
}

static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
{
}

static inline void hl_debugfs_add_cb(struct hl_cb *cb)
{
}

static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
{
}

static inline void hl_debugfs_add_cs(struct hl_cs *cs)
{
}

static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
{
}

static inline void hl_debugfs_add_job(struct hl_device *hdev,
					struct hl_cs_job *job)
{
}

static inline void hl_debugfs_remove_job(struct hl_device *hdev,
					struct hl_cs_job *job)
{
}

static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
					struct hl_userptr *userptr)
{
}

static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
					struct hl_userptr *userptr)
{
}

static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
					struct hl_ctx *ctx)
{
}

static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
					struct hl_ctx *ctx)
{
}

#endif

2045 2046
/* IOCTLs */
long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
2047
long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
2048
int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
2049 2050
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
2051
int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
2052

O
Oded Gabbay 已提交
2053
#endif /* HABANALABSP_H_ */