irq.c 9.0 KB
Newer Older
O
Oded Gabbay 已提交
1 2 3 4 5 6 7 8 9
// SPDX-License-Identifier: GPL-2.0

/*
 * Copyright 2016-2019 HabanaLabs, Ltd.
 * All Rights Reserved.
 */

#include "habanalabs.h"

10 11 12
#include <linux/slab.h>

/**
13
 * struct hl_eqe_work - This structure is used to schedule work of EQ
14
 *                      entry and cpucp_reset event
15
 *
16 17 18
 * @eq_work:          workqueue object to run when EQ entry is received
 * @hdev:             pointer to device structure
 * @eq_entry:         copy of the EQ entry
19 20 21 22 23 24
 */
struct hl_eqe_work {
	struct work_struct	eq_work;
	struct hl_device	*hdev;
	struct hl_eq_entry	eq_entry;
};
O
Oded Gabbay 已提交
25

26
/**
O
Oded Gabbay 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
 * hl_cq_inc_ptr - increment ci or pi of cq
 *
 * @ptr: the current ci or pi value of the completion queue
 *
 * Increment ptr by 1. If it reaches the number of completion queue
 * entries, set it to 0
 */
inline u32 hl_cq_inc_ptr(u32 ptr)
{
	ptr++;
	if (unlikely(ptr == HL_CQ_LENGTH))
		ptr = 0;
	return ptr;
}

42
/**
43 44 45 46 47 48 49
 * hl_eq_inc_ptr - increment ci of eq
 *
 * @ptr: the current ci value of the event queue
 *
 * Increment ptr by 1. If it reaches the number of event queue
 * entries, set it to 0
 */
50
static inline u32 hl_eq_inc_ptr(u32 ptr)
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
{
	ptr++;
	if (unlikely(ptr == HL_EQ_LENGTH))
		ptr = 0;
	return ptr;
}

static void irq_handle_eqe(struct work_struct *work)
{
	struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
							eq_work);
	struct hl_device *hdev = eqe_work->hdev;

	hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);

	kfree(eqe_work);
}

69
/**
O
Oded Gabbay 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83
 * hl_irq_handler_cq - irq handler for completion queue
 *
 * @irq: irq number
 * @arg: pointer to completion queue structure
 *
 */
irqreturn_t hl_irq_handler_cq(int irq, void *arg)
{
	struct hl_cq *cq = arg;
	struct hl_device *hdev = cq->hdev;
	struct hl_hw_queue *queue;
	struct hl_cs_job *job;
	bool shadow_index_valid;
	u16 shadow_index;
84
	struct hl_cq_entry *cq_entry, *cq_base;
O
Oded Gabbay 已提交
85 86 87 88 89 90 91 92

	if (hdev->disabled) {
		dev_dbg(hdev->dev,
			"Device disabled but received IRQ %d for CQ %d\n",
			irq, cq->hw_queue_id);
		return IRQ_HANDLED;
	}

93
	cq_base = cq->kernel_address;
O
Oded Gabbay 已提交
94 95

	while (1) {
96 97
		bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
					CQ_ENTRY_READY_MASK)
O
Oded Gabbay 已提交
98 99 100 101 102
						>> CQ_ENTRY_READY_SHIFT);

		if (!entry_ready)
			break;

103
		cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
O
Oded Gabbay 已提交
104

105
		/* Make sure we read CQ entry contents after we've
O
Oded Gabbay 已提交
106 107 108 109
		 * checked the ownership bit.
		 */
		dma_rmb();

110 111
		shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
					CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
O
Oded Gabbay 已提交
112 113
					>> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);

114 115
		shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
					CQ_ENTRY_SHADOW_INDEX_MASK)
O
Oded Gabbay 已提交
116 117 118 119 120 121
					>> CQ_ENTRY_SHADOW_INDEX_SHIFT);

		queue = &hdev->kernel_queues[cq->hw_queue_id];

		if ((shadow_index_valid) && (!hdev->disabled)) {
			job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
122
			queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
O
Oded Gabbay 已提交
123 124
		}

125
		atomic_inc(&queue->ci);
O
Oded Gabbay 已提交
126 127

		/* Clear CQ entry ready bit */
128 129
		cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
						~CQ_ENTRY_READY_MASK);
O
Oded Gabbay 已提交
130 131 132 133 134 135 136 137 138 139

		cq->ci = hl_cq_inc_ptr(cq->ci);

		/* Increment free slots */
		atomic_inc(&cq->free_slots_cnt);
	}

	return IRQ_HANDLED;
}

140 141 142 143
static void handle_user_cq(struct hl_device *hdev,
			struct hl_user_interrupt *user_cq)
{
	struct hl_user_pending_interrupt *pend;
144
	ktime_t now = ktime_get();
145 146

	spin_lock(&user_cq->wait_list_lock);
147
	list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node) {
148
		pend->fence.timestamp = now;
149
		complete_all(&pend->fence.completion);
150
	}
151 152 153
	spin_unlock(&user_cq->wait_list_lock);
}

154 155 156 157 158 159 160 161 162 163 164 165
/**
 * hl_irq_handler_user_cq - irq handler for user completion queues
 *
 * @irq: irq number
 * @arg: pointer to user interrupt structure
 *
 */
irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
{
	struct hl_user_interrupt *user_cq = arg;
	struct hl_device *hdev = user_cq->hdev;

166
	dev_dbg(hdev->dev,
167
		"got user completion interrupt id %u",
168 169 170 171 172 173 174
		user_cq->interrupt_id);

	/* Handle user cq interrupts registered on all interrupts */
	handle_user_cq(hdev, &hdev->common_user_interrupt);

	/* Handle user cq interrupts registered on this specific interrupt */
	handle_user_cq(hdev, user_cq);
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

	return IRQ_HANDLED;
}

/**
 * hl_irq_handler_default - default irq handler
 *
 * @irq: irq number
 * @arg: pointer to user interrupt structure
 *
 */
irqreturn_t hl_irq_handler_default(int irq, void *arg)
{
	struct hl_user_interrupt *user_interrupt = arg;
	struct hl_device *hdev = user_interrupt->hdev;
	u32 interrupt_id = user_interrupt->interrupt_id;

	dev_err(hdev->dev,
		"got invalid user interrupt %u",
		interrupt_id);

	return IRQ_HANDLED;
}

199
/**
200 201 202 203 204 205 206 207 208 209 210 211 212
 * hl_irq_handler_eq - irq handler for event queue
 *
 * @irq: irq number
 * @arg: pointer to event queue structure
 *
 */
irqreturn_t hl_irq_handler_eq(int irq, void *arg)
{
	struct hl_eq *eq = arg;
	struct hl_device *hdev = eq->hdev;
	struct hl_eq_entry *eq_entry;
	struct hl_eq_entry *eq_base;
	struct hl_eqe_work *handle_eqe_work;
213 214 215
	bool entry_ready;
	u32 cur_eqe;
	u16 cur_eqe_index;
216

217
	eq_base = eq->kernel_address;
218 219

	while (1) {
220 221
		cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
		entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
222 223 224 225

		if (!entry_ready)
			break;

226 227 228 229 230 231 232 233 234 235 236 237 238 239
		cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
		if ((hdev->event_queue.check_eqe_index) &&
				(((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
							!= cur_eqe_index)) {
			dev_dbg(hdev->dev,
				"EQE 0x%x in queue is ready but index does not match %d!=%d",
				eq_base[eq->ci].hdr.ctl,
				((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
				cur_eqe_index);
			break;
		}

		eq->prev_eqe_index++;

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
		eq_entry = &eq_base[eq->ci];

		/*
		 * Make sure we read EQ entry contents after we've
		 * checked the ownership bit.
		 */
		dma_rmb();

		if (hdev->disabled) {
			dev_warn(hdev->dev,
				"Device disabled but received IRQ %d for EQ\n",
					irq);
			goto skip_irq;
		}

		handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
		if (handle_eqe_work) {
			INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
			handle_eqe_work->hdev = hdev;

			memcpy(&handle_eqe_work->eq_entry, eq_entry,
					sizeof(*eq_entry));

			queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
		}
skip_irq:
		/* Clear EQ entry ready bit */
267
		eq_entry->hdr.ctl =
268
			cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
269
							~EQ_CTL_READY_MASK);
270 271 272 273 274 275 276 277 278

		eq->ci = hl_eq_inc_ptr(eq->ci);

		hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
	}

	return IRQ_HANDLED;
}

279
/**
O
Oded Gabbay 已提交
280 281 282 283 284 285 286 287 288 289 290 291 292
 * hl_cq_init - main initialization function for an cq object
 *
 * @hdev: pointer to device structure
 * @q: pointer to cq structure
 * @hw_queue_id: The H/W queue ID this completion queue belongs to
 *
 * Allocate dma-able memory for the completion queue and initialize fields
 * Returns 0 on success
 */
int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
{
	void *p;

293
	p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
O
Oded Gabbay 已提交
294 295 296 297 298
				&q->bus_address, GFP_KERNEL | __GFP_ZERO);
	if (!p)
		return -ENOMEM;

	q->hdev = hdev;
299
	q->kernel_address = p;
O
Oded Gabbay 已提交
300 301 302 303 304 305 306 307 308
	q->hw_queue_id = hw_queue_id;
	q->ci = 0;
	q->pi = 0;

	atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);

	return 0;
}

309
/**
O
Oded Gabbay 已提交
310 311 312 313 314 315 316 317 318
 * hl_cq_fini - destroy completion queue
 *
 * @hdev: pointer to device structure
 * @q: pointer to cq structure
 *
 * Free the completion queue memory
 */
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
{
319
	hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
320 321
						 q->kernel_address,
						 q->bus_address);
O
Oded Gabbay 已提交
322
}
323

324 325 326 327 328 329 330 331 332 333 334 335 336 337
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
{
	q->ci = 0;
	q->pi = 0;

	atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);

	/*
	 * It's not enough to just reset the PI/CI because the H/W may have
	 * written valid completion entries before it was halted and therefore
	 * we need to clean the actual queues so we won't process old entries
	 * when the device is operational again
	 */

338
	memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
339 340
}

341
/**
342 343 344 345 346 347 348 349 350 351 352 353
 * hl_eq_init - main initialization function for an event queue object
 *
 * @hdev: pointer to device structure
 * @q: pointer to eq structure
 *
 * Allocate dma-able memory for the event queue and initialize fields
 * Returns 0 on success
 */
int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{
	void *p;

354 355 356
	p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
							HL_EQ_SIZE_IN_BYTES,
							&q->bus_address);
357 358 359 360
	if (!p)
		return -ENOMEM;

	q->hdev = hdev;
361
	q->kernel_address = p;
362
	q->ci = 0;
363
	q->prev_eqe_index = 0;
364 365 366 367

	return 0;
}

368
/**
369 370 371 372 373 374 375 376 377 378 379
 * hl_eq_fini - destroy event queue
 *
 * @hdev: pointer to device structure
 * @q: pointer to eq structure
 *
 * Free the event queue memory
 */
void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
{
	flush_workqueue(hdev->eq_wq);

380 381
	hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
					HL_EQ_SIZE_IN_BYTES,
382
					q->kernel_address);
383
}
384 385 386 387

void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
{
	q->ci = 0;
388
	q->prev_eqe_index = 0;
389 390 391 392 393 394 395 396

	/*
	 * It's not enough to just reset the PI/CI because the H/W may have
	 * written valid completion entries before it was halted and therefore
	 * we need to clean the actual queues so we won't process old entries
	 * when the device is operational again
	 */

397
	memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
398
}