hw-me.c 19.4 KB
Newer Older
O
Oren Weil 已提交
1 2 3
/*
 *
 * Intel Management Engine Interface (Intel MEI) Linux driver
4
 * Copyright (c) 2003-2012, Intel Corporation.
O
Oren Weil 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 */

#include <linux/pci.h>
18 19 20

#include <linux/kthread.h>
#include <linux/interrupt.h>
21 22

#include "mei_dev.h"
23 24
#include "hbm.h"

25 26
#include "hw-me.h"
#include "hw-me-regs.h"
27

28
/**
29
 * mei_me_reg_read - Reads 32bit data from the mei device
30 31 32 33 34 35
 *
 * @dev: the device structure
 * @offset: offset from which to read the data
 *
 * returns register value (u32)
 */
36
static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
37 38
			       unsigned long offset)
{
39
	return ioread32(hw->mem_addr + offset);
40 41 42 43
}


/**
44
 * mei_me_reg_write - Writes 32bit data to the mei device
45 46 47 48 49
 *
 * @dev: the device structure
 * @offset: offset from which to write the data
 * @value: register value to write (u32)
 */
50
static inline void mei_me_reg_write(const struct mei_me_hw *hw,
51 52
				 unsigned long offset, u32 value)
{
53
	iowrite32(value, hw->mem_addr + offset);
54
}
O
Oren Weil 已提交
55

56
/**
57
 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
T
Tomas Winkler 已提交
58
 *  read window register
59 60 61
 *
 * @dev: the device structure
 *
T
Tomas Winkler 已提交
62
 * returns ME_CB_RW register value (u32)
63
 */
64
static u32 mei_me_mecbrw_read(const struct mei_device *dev)
65
{
66
	return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
67 68
}
/**
69
 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
70 71 72 73 74
 *
 * @dev: the device structure
 *
 * returns ME_CSR_HA register value (u32)
 */
75
static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
76
{
77
	return mei_me_reg_read(hw, ME_CSR_HA);
78
}
O
Oren Weil 已提交
79 80

/**
T
Tomas Winkler 已提交
81 82 83 84 85 86
 * mei_hcsr_read - Reads 32bit data from the host CSR
 *
 * @dev: the device structure
 *
 * returns H_CSR register value (u32)
 */
87
static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
T
Tomas Winkler 已提交
88
{
89
	return mei_me_reg_read(hw, H_CSR);
T
Tomas Winkler 已提交
90 91 92 93
}

/**
 * mei_hcsr_set - writes H_CSR register to the mei device,
O
Oren Weil 已提交
94 95 96 97
 * and ignores the H_IS bit for it is write-one-to-zero.
 *
 * @dev: the device structure
 */
98
static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
O
Oren Weil 已提交
99
{
100
	hcsr &= ~H_IS;
101
	mei_me_reg_write(hw, H_CSR, hcsr);
O
Oren Weil 已提交
102 103
}

104 105 106 107 108 109 110 111 112 113
/**
 * mei_me_fw_status - read fw status register from pci config space
 *
 * @dev: mei device
 * @fw_status: fw status register values
 */
static int mei_me_fw_status(struct mei_device *dev,
			    struct mei_fw_status *fw_status)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);
114 115
	struct mei_me_hw *hw = to_me_hw(dev);
	const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
	int ret;
	int i;

	if (!fw_status)
		return -EINVAL;

	fw_status->count = fw_src->count;
	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
		ret = pci_read_config_dword(pdev,
			fw_src->status[i], &fw_status->status[i]);
		if (ret)
			return ret;
	}

	return 0;
}
132 133

/**
134
 * mei_me_hw_config - configure hw dependent settings
135 136 137
 *
 * @dev: mei device
 */
138
static void mei_me_hw_config(struct mei_device *dev)
139
{
140
	struct mei_me_hw *hw = to_me_hw(dev);
141
	u32 hcsr = mei_hcsr_read(to_me_hw(dev));
142 143
	/* Doesn't change in runtime */
	dev->hbuf_depth = (hcsr & H_CBD) >> 24;
144 145

	hw->pg_state = MEI_PG_OFF;
146
}
147 148 149 150 151 152 153 154 155 156

/**
 * mei_me_pg_state  - translate internal pg state
 *   to the mei power gating state
 *
 * @hw -  me hardware
 * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
 */
static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
{
157
	struct mei_me_hw *hw = to_me_hw(dev);
158

159
	return hw->pg_state;
160 161
}

O
Oren Weil 已提交
162
/**
T
Tomas Winkler 已提交
163
 * mei_clear_interrupts - clear and stop interrupts
164 165 166
 *
 * @dev: the device structure
 */
167
static void mei_me_intr_clear(struct mei_device *dev)
168
{
169 170
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 hcsr = mei_hcsr_read(hw);
171

172
	if ((hcsr & H_IS) == H_IS)
173
		mei_me_reg_write(hw, H_CSR, hcsr);
174 175
}
/**
176
 * mei_me_intr_enable - enables mei device interrupts
O
Oren Weil 已提交
177 178 179
 *
 * @dev: the device structure
 */
180
static void mei_me_intr_enable(struct mei_device *dev)
O
Oren Weil 已提交
181
{
182 183
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 hcsr = mei_hcsr_read(hw);
184

185
	hcsr |= H_IE;
186
	mei_hcsr_set(hw, hcsr);
O
Oren Weil 已提交
187 188 189
}

/**
190
 * mei_disable_interrupts - disables mei device interrupts
O
Oren Weil 已提交
191 192 193
 *
 * @dev: the device structure
 */
194
static void mei_me_intr_disable(struct mei_device *dev)
O
Oren Weil 已提交
195
{
196 197
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 hcsr = mei_hcsr_read(hw);
198

199
	hcsr  &= ~H_IE;
200
	mei_hcsr_set(hw, hcsr);
O
Oren Weil 已提交
201 202
}

203 204 205 206 207 208 209 210 211 212 213 214 215
/**
 * mei_me_hw_reset_release - release device from the reset
 *
 * @dev: the device structure
 */
static void mei_me_hw_reset_release(struct mei_device *dev)
{
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 hcsr = mei_hcsr_read(hw);

	hcsr |= H_IG;
	hcsr &= ~H_RST;
	mei_hcsr_set(hw, hcsr);
T
Tomas Winkler 已提交
216 217 218

	/* complete this write before we set host ready on another CPU */
	mmiowb();
219
}
220
/**
221
 * mei_me_hw_reset - resets fw via mei csr register.
222 223
 *
 * @dev: the device structure
224
 * @intr_enable: if interrupt should be enabled after reset.
225
 */
226
static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
227
{
228 229
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 hcsr = mei_hcsr_read(hw);
230

T
Tomas Winkler 已提交
231
	hcsr |= H_RST | H_IG | H_IS;
232 233 234 235

	if (intr_enable)
		hcsr |= H_IE;
	else
T
Tomas Winkler 已提交
236
		hcsr &= ~H_IE;
237

238
	dev->recvd_hw_ready = false;
T
Tomas Winkler 已提交
239
	mei_me_reg_write(hw, H_CSR, hcsr);
240

241 242 243 244 245 246 247
	/*
	 * Host reads the H_CSR once to ensure that the
	 * posted write to H_CSR completes.
	 */
	hcsr = mei_hcsr_read(hw);

	if ((hcsr & H_RST) == 0)
248
		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
249 250

	if ((hcsr & H_RDY) == H_RDY)
251
		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
252

253
	if (intr_enable == false)
254
		mei_me_hw_reset_release(dev);
255

256
	return 0;
257 258
}

259
/**
260
 * mei_me_host_set_ready - enable device
261 262 263 264 265
 *
 * @dev - mei device
 * returns bool
 */

266
static void mei_me_host_set_ready(struct mei_device *dev)
267
{
268
	struct mei_me_hw *hw = to_me_hw(dev);
269

T
Tomas Winkler 已提交
270
	hw->host_hw_state = mei_hcsr_read(hw);
271 272
	hw->host_hw_state |= H_IE | H_IG | H_RDY;
	mei_hcsr_set(hw, hw->host_hw_state);
273 274
}
/**
275
 * mei_me_host_is_ready - check whether the host has turned ready
276 277 278 279
 *
 * @dev - mei device
 * returns bool
 */
280
static bool mei_me_host_is_ready(struct mei_device *dev)
281
{
282
	struct mei_me_hw *hw = to_me_hw(dev);
283

284 285
	hw->host_hw_state = mei_hcsr_read(hw);
	return (hw->host_hw_state & H_RDY) == H_RDY;
286 287 288
}

/**
289
 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
290 291 292 293
 *
 * @dev - mei device
 * returns bool
 */
294
static bool mei_me_hw_is_ready(struct mei_device *dev)
295
{
296
	struct mei_me_hw *hw = to_me_hw(dev);
297

298
	hw->me_hw_state = mei_me_mecsr_read(hw);
299
	return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
300
}
301

T
Tomas Winkler 已提交
302 303 304
static int mei_me_hw_ready_wait(struct mei_device *dev)
{
	mutex_unlock(&dev->device_lock);
305
	wait_event_timeout(dev->wait_hw_ready,
306
			dev->recvd_hw_ready,
307
			mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
T
Tomas Winkler 已提交
308
	mutex_lock(&dev->device_lock);
309
	if (!dev->recvd_hw_ready) {
310
		dev_err(dev->dev, "wait hw ready failed\n");
311
		return -ETIME;
T
Tomas Winkler 已提交
312 313 314 315 316 317 318 319 320
	}

	dev->recvd_hw_ready = false;
	return 0;
}

static int mei_me_hw_start(struct mei_device *dev)
{
	int ret = mei_me_hw_ready_wait(dev);
321

T
Tomas Winkler 已提交
322 323
	if (ret)
		return ret;
324
	dev_dbg(dev->dev, "hw is ready\n");
T
Tomas Winkler 已提交
325 326 327 328 329 330

	mei_me_host_set_ready(dev);
	return ret;
}


O
Oren Weil 已提交
331
/**
332
 * mei_hbuf_filled_slots - gets number of device filled buffer slots
O
Oren Weil 已提交
333
 *
334
 * @dev: the device structure
O
Oren Weil 已提交
335 336 337
 *
 * returns number of filled slots
 */
338
static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
O
Oren Weil 已提交
339
{
340
	struct mei_me_hw *hw = to_me_hw(dev);
O
Oren Weil 已提交
341 342
	char read_ptr, write_ptr;

343
	hw->host_hw_state = mei_hcsr_read(hw);
344

345 346
	read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
	write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
O
Oren Weil 已提交
347 348 349 350 351

	return (unsigned char) (write_ptr - read_ptr);
}

/**
352
 * mei_me_hbuf_is_empty - checks if host buffer is empty.
O
Oren Weil 已提交
353 354 355
 *
 * @dev: the device structure
 *
356
 * returns true if empty, false - otherwise.
O
Oren Weil 已提交
357
 */
358
static bool mei_me_hbuf_is_empty(struct mei_device *dev)
O
Oren Weil 已提交
359
{
360
	return mei_hbuf_filled_slots(dev) == 0;
O
Oren Weil 已提交
361 362 363
}

/**
364
 * mei_me_hbuf_empty_slots - counts write empty slots.
O
Oren Weil 已提交
365 366 367
 *
 * @dev: the device structure
 *
368
 * returns -EOVERFLOW if overflow, otherwise empty slots count
O
Oren Weil 已提交
369
 */
370
static int mei_me_hbuf_empty_slots(struct mei_device *dev)
O
Oren Weil 已提交
371
{
372
	unsigned char filled_slots, empty_slots;
O
Oren Weil 已提交
373

374
	filled_slots = mei_hbuf_filled_slots(dev);
375
	empty_slots = dev->hbuf_depth - filled_slots;
O
Oren Weil 已提交
376 377

	/* check for overflow */
378
	if (filled_slots > dev->hbuf_depth)
O
Oren Weil 已提交
379 380 381 382 383
		return -EOVERFLOW;

	return empty_slots;
}

384 385 386 387 388 389
static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
{
	return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
}


O
Oren Weil 已提交
390
/**
391
 * mei_me_write_message - writes a message to mei device.
O
Oren Weil 已提交
392 393
 *
 * @dev: the device structure
394
 * @header: mei HECI header of message
395
 * @buf: message payload will be written
O
Oren Weil 已提交
396
 *
397
 * This function returns -EIO if write has failed
O
Oren Weil 已提交
398
 */
399 400 401
static int mei_me_write_message(struct mei_device *dev,
			struct mei_msg_hdr *header,
			unsigned char *buf)
O
Oren Weil 已提交
402
{
403
	struct mei_me_hw *hw = to_me_hw(dev);
T
Tomas Winkler 已提交
404
	unsigned long rem;
405
	unsigned long length = header->length;
406
	u32 *reg_buf = (u32 *)buf;
407
	u32 hcsr;
T
Tomas Winkler 已提交
408
	u32 dw_cnt;
409 410
	int i;
	int empty_slots;
O
Oren Weil 已提交
411

412
	dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
O
Oren Weil 已提交
413

414
	empty_slots = mei_hbuf_empty_slots(dev);
415
	dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
O
Oren Weil 已提交
416

417
	dw_cnt = mei_data2slots(length);
418
	if (empty_slots < 0 || dw_cnt > empty_slots)
419
		return -EMSGSIZE;
O
Oren Weil 已提交
420

421
	mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
O
Oren Weil 已提交
422

423
	for (i = 0; i < length / 4; i++)
424
		mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
O
Oren Weil 已提交
425

426 427 428
	rem = length & 0x3;
	if (rem > 0) {
		u32 reg = 0;
429

430
		memcpy(&reg, &buf[length - rem], rem);
431
		mei_me_reg_write(hw, H_CB_WW, reg);
O
Oren Weil 已提交
432 433
	}

434 435
	hcsr = mei_hcsr_read(hw) | H_IG;
	mei_hcsr_set(hw, hcsr);
436
	if (!mei_me_hw_is_ready(dev))
437
		return -EIO;
O
Oren Weil 已提交
438

439
	return 0;
O
Oren Weil 已提交
440 441 442
}

/**
443
 * mei_me_count_full_read_slots - counts read full slots.
O
Oren Weil 已提交
444 445 446
 *
 * @dev: the device structure
 *
447
 * returns -EOVERFLOW if overflow, otherwise filled slots count
O
Oren Weil 已提交
448
 */
449
static int mei_me_count_full_read_slots(struct mei_device *dev)
O
Oren Weil 已提交
450
{
451
	struct mei_me_hw *hw = to_me_hw(dev);
O
Oren Weil 已提交
452 453 454
	char read_ptr, write_ptr;
	unsigned char buffer_depth, filled_slots;

455
	hw->me_hw_state = mei_me_mecsr_read(hw);
456 457 458
	buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
	read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
	write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
O
Oren Weil 已提交
459 460 461 462 463 464
	filled_slots = (unsigned char) (write_ptr - read_ptr);

	/* check for overflow */
	if (filled_slots > buffer_depth)
		return -EOVERFLOW;

465
	dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
O
Oren Weil 已提交
466 467 468 469
	return (int)filled_slots;
}

/**
470
 * mei_me_read_slots - reads a message from mei device.
O
Oren Weil 已提交
471 472 473 474 475
 *
 * @dev: the device structure
 * @buffer: message buffer will be written
 * @buffer_length: message size will be read
 */
476
static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
477
		    unsigned long buffer_length)
O
Oren Weil 已提交
478
{
479
	struct mei_me_hw *hw = to_me_hw(dev);
480
	u32 *reg_buf = (u32 *)buffer;
481
	u32 hcsr;
O
Oren Weil 已提交
482

483
	for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
484
		*reg_buf++ = mei_me_mecbrw_read(dev);
O
Oren Weil 已提交
485 486

	if (buffer_length > 0) {
487
		u32 reg = mei_me_mecbrw_read(dev);
488

489
		memcpy(reg_buf, &reg, buffer_length);
O
Oren Weil 已提交
490 491
	}

492 493
	hcsr = mei_hcsr_read(hw) | H_IG;
	mei_hcsr_set(hw, hcsr);
494
	return 0;
O
Oren Weil 已提交
495 496
}

497
/**
498
 * mei_me_pg_enter - write pg enter register
499 500 501 502 503 504 505
 *
 * @dev: the device structure
 */
static void mei_me_pg_enter(struct mei_device *dev)
{
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
506

507 508 509 510 511
	reg |= H_HPG_CSR_PGI;
	mei_me_reg_write(hw, H_HPG_CSR, reg);
}

/**
512
 * mei_me_pg_exit - write pg exit register
513 514 515 516 517 518 519 520 521 522 523 524 525 526
 *
 * @dev: the device structure
 */
static void mei_me_pg_exit(struct mei_device *dev)
{
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 reg = mei_me_reg_read(hw, H_HPG_CSR);

	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");

	reg |= H_HPG_CSR_PGIHEXR;
	mei_me_reg_write(hw, H_HPG_CSR, reg);
}

527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
/**
 * mei_me_pg_set_sync - perform pg entry procedure
 *
 * @dev: the device structure
 *
 * returns 0 on success an error code otherwise
 */
int mei_me_pg_set_sync(struct mei_device *dev)
{
	struct mei_me_hw *hw = to_me_hw(dev);
	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
	int ret;

	dev->pg_event = MEI_PG_EVENT_WAIT;

	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
	if (ret)
		return ret;

	mutex_unlock(&dev->device_lock);
	wait_event_timeout(dev->wait_pg,
		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
	mutex_lock(&dev->device_lock);

	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
		mei_me_pg_enter(dev);
		ret = 0;
	} else {
		ret = -ETIME;
	}

	dev->pg_event = MEI_PG_EVENT_IDLE;
	hw->pg_state = MEI_PG_ON;

	return ret;
}

/**
 * mei_me_pg_unset_sync - perform pg exit procedure
 *
 * @dev: the device structure
 *
 * returns 0 on success an error code otherwise
 */
int mei_me_pg_unset_sync(struct mei_device *dev)
{
	struct mei_me_hw *hw = to_me_hw(dev);
	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
	int ret;

	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
		goto reply;

	dev->pg_event = MEI_PG_EVENT_WAIT;

	mei_me_pg_exit(dev);

	mutex_unlock(&dev->device_lock);
	wait_event_timeout(dev->wait_pg,
		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
	mutex_lock(&dev->device_lock);

reply:
	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
		ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
	else
		ret = -ETIME;

	dev->pg_event = MEI_PG_EVENT_IDLE;
	hw->pg_state = MEI_PG_OFF;

	return ret;
}

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
/**
 * mei_me_pg_is_enabled - detect if PG is supported by HW
 *
 * @dev: the device structure
 *
 * returns: true is pg supported, false otherwise
 */
static bool mei_me_pg_is_enabled(struct mei_device *dev)
{
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 reg = mei_me_reg_read(hw, ME_CSR_HA);

	if ((reg & ME_PGIC_HRA) == 0)
		goto notsupported;

616
	if (!dev->hbm_f_pg_supported)
617 618 619 620 621
		goto notsupported;

	return true;

notsupported:
622
	dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
623 624 625 626 627 628 629 630 631
		!!(reg & ME_PGIC_HRA),
		dev->version.major_version,
		dev->version.minor_version,
		HBM_MAJOR_VERSION_PGI,
		HBM_MINOR_VERSION_PGI);

	return false;
}

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
/**
 * mei_me_irq_quick_handler - The ISR of the MEI device
 *
 * @irq: The irq number
 * @dev_id: pointer to the device structure
 *
 * returns irqreturn_t
 */

irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
{
	struct mei_device *dev = (struct mei_device *) dev_id;
	struct mei_me_hw *hw = to_me_hw(dev);
	u32 csr_reg = mei_hcsr_read(hw);

	if ((csr_reg & H_IS) != H_IS)
		return IRQ_NONE;

	/* clear H_IS bit in H_CSR */
651
	mei_me_reg_write(hw, H_CSR, csr_reg);
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670

	return IRQ_WAKE_THREAD;
}

/**
 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
 * processing.
 *
 * @irq: The irq number
 * @dev_id: pointer to the device structure
 *
 * returns irqreturn_t
 *
 */
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
{
	struct mei_device *dev = (struct mei_device *) dev_id;
	struct mei_cl_cb complete_list;
	s32 slots;
671
	int rets = 0;
672

673
	dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
674 675 676 677 678 679
	/* initialize our complete list */
	mutex_lock(&dev->device_lock);
	mei_io_list_init(&complete_list);

	/* Ack the interrupt here
	 * In case of MSI we don't go through the quick handler */
680
	if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
681 682 683
		mei_clear_interrupts(dev);

	/* check if ME wants a reset */
684
	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
685
		dev_warn(dev->dev, "FW not ready: resetting.\n");
686 687
		schedule_work(&dev->reset_work);
		goto end;
688 689 690 691 692
	}

	/*  check if we need to start the dev */
	if (!mei_host_is_ready(dev)) {
		if (mei_hw_is_ready(dev)) {
T
Tomas Winkler 已提交
693
			mei_me_hw_reset_release(dev);
694
			dev_dbg(dev->dev, "we need to start the dev.\n");
695

T
Tomas Winkler 已提交
696
			dev->recvd_hw_ready = true;
697
			wake_up(&dev->wait_hw_ready);
698
		} else {
699
			dev_dbg(dev->dev, "Spurious Interrupt\n");
700
		}
701
		goto end;
702 703 704 705
	}
	/* check slots available for reading */
	slots = mei_count_full_read_slots(dev);
	while (slots > 0) {
706
		dev_dbg(dev->dev, "slots to read = %08x\n", slots);
707
		rets = mei_irq_read_handler(dev, &complete_list, &slots);
708 709 710 711 712 713 714
		/* There is a race between ME write and interrupt delivery:
		 * Not all data is always available immediately after the
		 * interrupt, so try to read again on the next interrupt.
		 */
		if (rets == -ENODATA)
			break;

715
		if (rets && dev->dev_state != MEI_DEV_RESETTING) {
716
			dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
717
						rets);
718
			schedule_work(&dev->reset_work);
719
			goto end;
720
		}
721
	}
722

723 724
	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);

725 726 727 728 729 730 731 732 733
	/*
	 * During PG handshake only allowed write is the replay to the
	 * PG exit message, so block calling write function
	 * if the pg state is not idle
	 */
	if (dev->pg_event == MEI_PG_EVENT_IDLE) {
		rets = mei_irq_write_handler(dev, &complete_list);
		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
	}
734

735
	mei_irq_compl_handler(dev, &complete_list);
736

737
end:
738
	dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
739
	mutex_unlock(&dev->device_lock);
740 741
	return IRQ_HANDLED;
}
742

743 744
static const struct mei_hw_ops mei_me_hw_ops = {

745
	.fw_status = mei_me_fw_status,
746 747
	.pg_state  = mei_me_pg_state,

748 749 750 751
	.host_is_ready = mei_me_host_is_ready,

	.hw_is_ready = mei_me_hw_is_ready,
	.hw_reset = mei_me_hw_reset,
T
Tomas Winkler 已提交
752 753
	.hw_config = mei_me_hw_config,
	.hw_start = mei_me_hw_start,
754

755 756
	.pg_is_enabled = mei_me_pg_is_enabled,

757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	.intr_clear = mei_me_intr_clear,
	.intr_enable = mei_me_intr_enable,
	.intr_disable = mei_me_intr_disable,

	.hbuf_free_slots = mei_me_hbuf_empty_slots,
	.hbuf_is_ready = mei_me_hbuf_is_empty,
	.hbuf_max_len = mei_me_hbuf_max_len,

	.write = mei_me_write_message,

	.rdbuf_full_slots = mei_me_count_full_read_slots,
	.read_hdr = mei_me_mecbrw_read,
	.read = mei_me_read_slots
};

772 773 774
static bool mei_me_fw_type_nm(struct pci_dev *pdev)
{
	u32 reg;
775

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
	pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
	return (reg & 0x600) == 0x200;
}

#define MEI_CFG_FW_NM                           \
	.quirk_probe = mei_me_fw_type_nm

static bool mei_me_fw_type_sps(struct pci_dev *pdev)
{
	u32 reg;
	/* Read ME FW Status check for SPS Firmware */
	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
	/* if bits [19:16] = 15, running SPS Firmware */
	return (reg & 0xf0000) == 0xf0000;
}

#define MEI_CFG_FW_SPS                           \
	.quirk_probe = mei_me_fw_type_sps


797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
#define MEI_CFG_LEGACY_HFS                      \
	.fw_status.count = 0

#define MEI_CFG_ICH_HFS                        \
	.fw_status.count = 1,                   \
	.fw_status.status[0] = PCI_CFG_HFS_1

#define MEI_CFG_PCH_HFS                         \
	.fw_status.count = 2,                   \
	.fw_status.status[0] = PCI_CFG_HFS_1,   \
	.fw_status.status[1] = PCI_CFG_HFS_2


/* ICH Legacy devices */
const struct mei_cfg mei_me_legacy_cfg = {
	MEI_CFG_LEGACY_HFS,
};

/* ICH devices */
const struct mei_cfg mei_me_ich_cfg = {
	MEI_CFG_ICH_HFS,
};

/* PCH devices */
const struct mei_cfg mei_me_pch_cfg = {
	MEI_CFG_PCH_HFS,
};

825 826 827 828 829 830 831 832 833 834 835 836 837

/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
	MEI_CFG_PCH_HFS,
	MEI_CFG_FW_NM,
};

/* PCH Lynx Point with quirk for SPS Firmware exclusion */
const struct mei_cfg mei_me_lpt_cfg = {
	MEI_CFG_PCH_HFS,
	MEI_CFG_FW_SPS,
};

838
/**
839
 * mei_me_dev_init - allocates and initializes the mei device structure
840 841
 *
 * @pdev: The pci device structure
842
 * @cfg: per device generation config
843 844 845
 *
 * returns The mei_device_device pointer on success, NULL on failure.
 */
846 847
struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
				   const struct mei_cfg *cfg)
848 849
{
	struct mei_device *dev;
850
	struct mei_me_hw *hw;
851 852 853 854 855

	dev = kzalloc(sizeof(struct mei_device) +
			 sizeof(struct mei_me_hw), GFP_KERNEL);
	if (!dev)
		return NULL;
856
	hw = to_me_hw(dev);
857

858
	mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
859
	hw->cfg = cfg;
860 861
	return dev;
}
862