main.c 24.3 KB
Newer Older
1 2 3
/*
 *
 * Intel Management Engine Interface (Intel MEI) Linux driver
4
 * Copyright (c) 2003-2012, Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 */

17 18
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/aio.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/sched.h>
#include <linux/uuid.h>
#include <linux/compat.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
38
#include <linux/miscdevice.h>
39 40

#include "mei_dev.h"
41
#include <linux/mei.h>
42 43
#include "interface.h"

44 45
/* AMT device is a singleton on the platform */
static struct pci_dev *mei_pdev;
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

/* mei_pci_tbl - PCI Device ID Table */
static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
80 81
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

	/* required last entry */
	{0, }
};

MODULE_DEVICE_TABLE(pci, mei_pci_tbl);

static DEFINE_MUTEX(mei_mutex);


/**
 * find_read_list_entry - find read list entry
 *
 * @dev: device structure
 * @file: pointer to file structure
 *
 * returns cb on success, NULL on error
 */
static struct mei_cl_cb *find_read_list_entry(
		struct mei_device *dev,
		struct mei_cl *cl)
{
104 105
	struct mei_cl_cb *pos = NULL;
	struct mei_cl_cb *next = NULL;
106

107
	dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
108 109
	list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
		if (mei_cl_cmp_id(cl, pos->cl))
110
			return pos;
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	return NULL;
}

/**
 * mei_open - the open function
 *
 * @inode: pointer to inode structure
 * @file: pointer to file structure
 *
 * returns 0 on success, <0 on error
 */
static int mei_open(struct inode *inode, struct file *file)
{
	struct mei_cl *cl;
	struct mei_device *dev;
126 127
	unsigned long cl_id;
	int err;
128 129

	err = -ENODEV;
130
	if (!mei_pdev)
131 132
		goto out;

133
	dev = pci_get_drvdata(mei_pdev);
134
	if (!dev)
135 136 137 138
		goto out;

	mutex_lock(&dev->device_lock);
	err = -ENOMEM;
139
	cl = mei_cl_allocate(dev);
140
	if (!cl)
141
		goto out_unlock;
142 143

	err = -ENODEV;
144 145 146
	if (dev->dev_state != MEI_DEV_ENABLED) {
		dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED  dev_state = %s\n",
		    mei_dev_state_str(dev->dev_state));
147 148 149
		goto out_unlock;
	}
	err = -EMFILE;
150 151 152
	if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
		dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
			MEI_MAX_OPEN_HANDLE_COUNT);
153
		goto out_unlock;
154
	}
155

156
	cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
157 158 159
	if (cl_id >= MEI_CLIENTS_MAX) {
		dev_err(&dev->pdev->dev, "client_id exceded %d",
				MEI_CLIENTS_MAX) ;
160
		goto out_unlock;
161
	}
162

163 164
	cl->host_client_id  = cl_id;

165 166 167
	dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);

	dev->open_handle_count++;
168

169 170 171 172 173 174 175 176 177
	list_add_tail(&cl->link, &dev->file_list);

	set_bit(cl->host_client_id, dev->host_clients_map);
	cl->state = MEI_FILE_INITIALIZING;
	cl->sm_state = 0;

	file->private_data = cl;
	mutex_unlock(&dev->device_lock);

178
	return nonseekable_open(inode, file);
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207

out_unlock:
	mutex_unlock(&dev->device_lock);
	kfree(cl);
out:
	return err;
}

/**
 * mei_release - the release function
 *
 * @inode: pointer to inode structure
 * @file: pointer to file structure
 *
 * returns 0 on success, <0 on error
 */
static int mei_release(struct inode *inode, struct file *file)
{
	struct mei_cl *cl = file->private_data;
	struct mei_cl_cb *cb;
	struct mei_device *dev;
	int rets = 0;

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	mutex_lock(&dev->device_lock);
208 209 210 211 212 213 214 215 216
	if (cl == &dev->iamthif_cl) {
		rets = mei_amthif_release(dev, file);
		goto out;
	}
	if (cl->state == MEI_FILE_CONNECTED) {
		cl->state = MEI_FILE_DISCONNECTING;
		dev_dbg(&dev->pdev->dev,
			"disconnecting client host client = %d, "
		    "ME client = %d\n",
217 218
		    cl->host_client_id,
		    cl->me_client_id);
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
		rets = mei_disconnect_host_client(dev, cl);
	}
	mei_cl_flush_queues(cl);
	dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
	    cl->host_client_id,
	    cl->me_client_id);

	if (dev->open_handle_count > 0) {
		clear_bit(cl->host_client_id, dev->host_clients_map);
		dev->open_handle_count--;
	}
	mei_remove_client_from_file_list(dev, cl->host_client_id);

	/* free read cb */
	cb = NULL;
	if (cl->read_cb) {
		cb = find_read_list_entry(dev, cl);
		/* Remove entry from read list */
		if (cb)
			list_del(&cb->list);

		cb = cl->read_cb;
		cl->read_cb = NULL;
	}
243

244
	file->private_data = NULL;
245

246 247
	if (cb) {
		mei_io_cb_free(cb);
248 249
		cb = NULL;
	}
250 251 252

	kfree(cl);
out:
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
	mutex_unlock(&dev->device_lock);
	return rets;
}


/**
 * mei_read - the read function.
 *
 * @file: pointer to file structure
 * @ubuf: pointer to user buffer
 * @length: buffer length
 * @offset: data offset in buffer
 *
 * returns >=0 data length on success , <0 on error
 */
static ssize_t mei_read(struct file *file, char __user *ubuf,
269
			size_t length, loff_t *offset)
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
{
	struct mei_cl *cl = file->private_data;
	struct mei_cl_cb *cb_pos = NULL;
	struct mei_cl_cb *cb = NULL;
	struct mei_device *dev;
	int i;
	int rets;
	int err;


	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	mutex_lock(&dev->device_lock);
286
	if (dev->dev_state != MEI_DEV_ENABLED) {
287 288 289 290 291 292
		rets = -ENODEV;
		goto out;
	}

	if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
		/* Do not allow to read watchdog client */
293
		i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
294 295 296 297 298 299 300 301 302 303 304 305
		if (i >= 0) {
			struct mei_me_client *me_client = &dev->me_clients[i];
			if (cl->me_client_id == me_client->client_id) {
				rets = -EBADF;
				goto out;
			}
		}
	} else {
		cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
	}

	if (cl == &dev->iamthif_cl) {
306
		rets = mei_amthif_read(dev, file, ubuf, length, offset);
307 308 309
		goto out;
	}

310
	if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
311 312
		cb = cl->read_cb;
		goto copy_buffer;
313 314
	} else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
		   cl->read_cb->buf_idx <= *offset) {
315 316 317
		cb = cl->read_cb;
		rets = 0;
		goto free;
318
	} else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
319
		/*Offset needs to be cleaned for contiguous reads*/
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
		*offset = 0;
		rets = 0;
		goto out;
	}

	err = mei_start_read(dev, cl);
	if (err && err != -EBUSY) {
		dev_dbg(&dev->pdev->dev,
			"mei start read failure with status = %d\n", err);
		rets = err;
		goto out;
	}

	if (MEI_READ_COMPLETE != cl->reading_state &&
			!waitqueue_active(&cl->rx_wait)) {
		if (file->f_flags & O_NONBLOCK) {
			rets = -EAGAIN;
			goto out;
		}

		mutex_unlock(&dev->device_lock);

		if (wait_event_interruptible(cl->rx_wait,
			(MEI_READ_COMPLETE == cl->reading_state ||
			 MEI_FILE_INITIALIZING == cl->state ||
			 MEI_FILE_DISCONNECTED == cl->state ||
			 MEI_FILE_DISCONNECTING == cl->state))) {
			if (signal_pending(current))
				return -EINTR;
			return -ERESTARTSYS;
		}

		mutex_lock(&dev->device_lock);
		if (MEI_FILE_INITIALIZING == cl->state ||
		    MEI_FILE_DISCONNECTED == cl->state ||
		    MEI_FILE_DISCONNECTING == cl->state) {
			rets = -EBUSY;
			goto out;
		}
	}

	cb = cl->read_cb;

	if (!cb) {
		rets = -ENODEV;
		goto out;
	}
	if (cl->reading_state != MEI_READ_COMPLETE) {
		rets = 0;
		goto out;
	}
	/* now copy the data to user space */
copy_buffer:
	dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
	    cb->response_buffer.size);
375 376
	dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
	if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
377 378 379 380
		rets = -EMSGSIZE;
		goto free;
	}

381 382 383
	/* length is being truncated to PAGE_SIZE,
	 * however buf_idx may point beyond that */
	length = min_t(size_t, length, cb->buf_idx - *offset);
384

385
	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
386 387 388 389 390 391
		rets = -EFAULT;
		goto free;
	}

	rets = length;
	*offset += length;
392
	if ((unsigned long)*offset < cb->buf_idx)
393 394 395 396 397 398
		goto out;

free:
	cb_pos = find_read_list_entry(dev, cl);
	/* Remove entry from read list */
	if (cb_pos)
399
		list_del(&cb_pos->list);
400
	mei_io_cb_free(cb);
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	cl->reading_state = MEI_IDLE;
	cl->read_cb = NULL;
	cl->read_pending = 0;
out:
	dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
	mutex_unlock(&dev->device_lock);
	return rets;
}
/**
 * mei_write - the write function.
 *
 * @file: pointer to file structure
 * @ubuf: pointer to user buffer
 * @length: buffer length
 * @offset: data offset in buffer
 *
 * returns >=0 data length on success , <0 on error
 */
static ssize_t mei_write(struct file *file, const char __user *ubuf,
420
			 size_t length, loff_t *offset)
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
{
	struct mei_cl *cl = file->private_data;
	struct mei_cl_cb *write_cb = NULL;
	struct mei_msg_hdr mei_hdr;
	struct mei_device *dev;
	unsigned long timeout = 0;
	int rets;
	int i;

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	mutex_lock(&dev->device_lock);

437
	if (dev->dev_state != MEI_DEV_ENABLED) {
438
		rets = -ENODEV;
439
		goto err;
440 441
	}

442 443 444
	i = mei_me_cl_by_id(dev, cl->me_client_id);
	if (i < 0) {
		rets = -ENODEV;
445
		goto err;
446 447 448
	}
	if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
		rets = -EMSGSIZE;
449
		goto err;
450 451 452 453 454 455
	}

	if (cl->state != MEI_FILE_CONNECTED) {
		rets = -ENODEV;
		dev_err(&dev->pdev->dev, "host client = %d,  is not connected to ME client = %d",
			cl->host_client_id, cl->me_client_id);
456
		goto err;
457
	}
458
	if (cl == &dev->iamthif_cl) {
459
		write_cb = mei_amthif_find_read_list_entry(dev, file);
460 461 462

		if (write_cb) {
			timeout = write_cb->read_time +
463
				mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
464 465

			if (time_after(jiffies, timeout) ||
466 467
			    cl->reading_state == MEI_READ_COMPLETE) {
				*offset = 0;
468
				list_del(&write_cb->list);
469
				mei_io_cb_free(write_cb);
470
				write_cb = NULL;
471 472 473 474 475 476 477 478 479
			}
		}
	}

	/* free entry used in read */
	if (cl->reading_state == MEI_READ_COMPLETE) {
		*offset = 0;
		write_cb = find_read_list_entry(dev, cl);
		if (write_cb) {
480
			list_del(&write_cb->list);
481
			mei_io_cb_free(write_cb);
482 483 484 485 486
			write_cb = NULL;
			cl->reading_state = MEI_IDLE;
			cl->read_cb = NULL;
			cl->read_pending = 0;
		}
487
	} else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
488 489 490
		*offset = 0;


491
	write_cb = mei_io_cb_init(cl, file);
492
	if (!write_cb) {
493 494
		dev_err(&dev->pdev->dev, "write cb allocation failed\n");
		rets = -ENOMEM;
495
		goto err;
496
	}
497 498
	rets = mei_io_cb_alloc_req_buf(write_cb, length);
	if (rets)
499
		goto err;
500

501
	dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
502

503 504
	rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
	if (rets)
505
		goto err;
506 507 508 509 510 511 512 513 514 515 516 517

	cl->sm_state = 0;
	if (length == 4 &&
	    ((memcmp(mei_wd_state_independence_msg[0],
				 write_cb->request_buffer.data, 4) == 0) ||
	     (memcmp(mei_wd_state_independence_msg[1],
				 write_cb->request_buffer.data, 4) == 0) ||
	     (memcmp(mei_wd_state_independence_msg[2],
				 write_cb->request_buffer.data, 4) == 0)))
		cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;

	if (cl == &dev->iamthif_cl) {
518
		rets = mei_amthif_write(dev, write_cb);
519

520 521 522 523
		if (rets) {
			dev_err(&dev->pdev->dev,
				"amthi write failed with status = %d\n", rets);
			goto err;
524 525
		}
		mutex_unlock(&dev->device_lock);
526
		return length;
527 528
	}

529
	write_cb->fop_type = MEI_FOP_WRITE;
530 531 532 533 534

	dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
	    cl->host_client_id, cl->me_client_id);
	rets = mei_flow_ctrl_creds(dev, cl);
	if (rets < 0)
535
		goto err;
536

537 538 539
	if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
		write_cb->buf_idx = 0;
		mei_hdr.msg_complete = 0;
540
		cl->writing_state = MEI_WRITING;
541 542
		goto out;
	}
543

544 545 546 547
	dev->mei_host_buffer_is_empty = false;
	if (length >  mei_hbuf_max_data(dev)) {
		mei_hdr.length = mei_hbuf_max_data(dev);
		mei_hdr.msg_complete = 0;
548
	} else {
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
		mei_hdr.length = length;
		mei_hdr.msg_complete = 1;
	}
	mei_hdr.host_addr = cl->host_client_id;
	mei_hdr.me_addr = cl->me_client_id;
	mei_hdr.reserved = 0;
	dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
	    *((u32 *) &mei_hdr));
	if (mei_write_message(dev, &mei_hdr,
		write_cb->request_buffer.data, mei_hdr.length)) {
		rets = -ENODEV;
		goto err;
	}
	cl->writing_state = MEI_WRITING;
	write_cb->buf_idx = mei_hdr.length;
564

565 566 567 568 569 570 571 572
out:
	if (mei_hdr.msg_complete) {
		if (mei_flow_ctrl_reduce(dev, cl)) {
			rets = -ENODEV;
			goto err;
		}
		list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
	} else {
573
		list_add_tail(&write_cb->list, &dev->write_list.list);
574
	}
575

576 577 578
	mutex_unlock(&dev->device_lock);
	return length;

579
err:
580
	mutex_unlock(&dev->device_lock);
581
	mei_io_cb_free(write_cb);
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
	return rets;
}


/**
 * mei_ioctl - the IOCTL function
 *
 * @file: pointer to file structure
 * @cmd: ioctl command
 * @data: pointer to mei message structure
 *
 * returns 0 on success , <0 on error
 */
static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
	struct mei_device *dev;
	struct mei_cl *cl = file->private_data;
	struct mei_connect_client_data *connect_data = NULL;
	int rets;

	if (cmd != IOCTL_MEI_CONNECT_CLIENT)
		return -EINVAL;

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);

	mutex_lock(&dev->device_lock);
613
	if (dev->dev_state != MEI_DEV_ENABLED) {
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
		rets = -ENODEV;
		goto out;
	}

	dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");

	connect_data = kzalloc(sizeof(struct mei_connect_client_data),
							GFP_KERNEL);
	if (!connect_data) {
		rets = -ENOMEM;
		goto out;
	}
	dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
	if (copy_from_user(connect_data, (char __user *)data,
				sizeof(struct mei_connect_client_data))) {
		dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
		rets = -EFAULT;
		goto out;
	}
	rets = mei_ioctl_connect_client(file, connect_data);

	/* if all is ok, copying the data back to user. */
	if (rets)
		goto out;

	dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
	if (copy_to_user((char __user *)data, connect_data,
				sizeof(struct mei_connect_client_data))) {
		dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
		rets = -EFAULT;
		goto out;
	}

out:
	kfree(connect_data);
	mutex_unlock(&dev->device_lock);
	return rets;
}

/**
 * mei_compat_ioctl - the compat IOCTL function
 *
 * @file: pointer to file structure
 * @cmd: ioctl command
 * @data: pointer to mei message structure
 *
 * returns 0 on success , <0 on error
 */
#ifdef CONFIG_COMPAT
static long mei_compat_ioctl(struct file *file,
664
			unsigned int cmd, unsigned long data)
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
{
	return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
}
#endif


/**
 * mei_poll - the poll function
 *
 * @file: pointer to file structure
 * @wait: pointer to poll_table structure
 *
 * returns poll mask
 */
static unsigned int mei_poll(struct file *file, poll_table *wait)
{
	struct mei_cl *cl = file->private_data;
	struct mei_device *dev;
	unsigned int mask = 0;

	if (WARN_ON(!cl || !cl->dev))
		return mask;

	dev = cl->dev;

	mutex_lock(&dev->device_lock);

692
	if (dev->dev_state != MEI_DEV_ENABLED)
693 694 695 696 697 698 699 700 701 702 703
		goto out;


	if (cl == &dev->iamthif_cl) {
		mutex_unlock(&dev->device_lock);
		poll_wait(file, &dev->iamthif_cl.wait, wait);
		mutex_lock(&dev->device_lock);
		if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
			dev->iamthif_file_object == file) {
			mask |= (POLLIN | POLLRDNORM);
			dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
704
			mei_amthif_run_next_cmd(dev);
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
		}
		goto out;
	}

	mutex_unlock(&dev->device_lock);
	poll_wait(file, &cl->tx_wait, wait);
	mutex_lock(&dev->device_lock);
	if (MEI_WRITE_COMPLETE == cl->writing_state)
		mask |= (POLLIN | POLLRDNORM);

out:
	mutex_unlock(&dev->device_lock);
	return mask;
}

720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
/*
 * file operations structure will be used for mei char device.
 */
static const struct file_operations mei_fops = {
	.owner = THIS_MODULE,
	.read = mei_read,
	.unlocked_ioctl = mei_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl = mei_compat_ioctl,
#endif
	.open = mei_open,
	.release = mei_release,
	.write = mei_write,
	.poll = mei_poll,
	.llseek = no_llseek
};


/*
 * Misc Device Struct
 */
static struct miscdevice  mei_misc_device = {
742
		.name = "mei",
743 744 745 746
		.fops = &mei_fops,
		.minor = MISC_DYNAMIC_MINOR,
};

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
/**
 * mei_quirk_probe - probe for devices that doesn't valid ME interface
 * @pdev: PCI device structure
 * @ent: entry into pci_device_table
 *
 * returns true if ME Interface is valid, false otherwise
 */
static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	u32 reg;
	if (ent->device == MEI_DEV_ID_PBG_1) {
		pci_read_config_dword(pdev, 0x48, &reg);
		/* make sure that bit 9 is up and bit 10 is down */
		if ((reg & 0x600) == 0x200) {
			dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
			return false;
		}
	}
	return true;
}
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int __devinit mei_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct mei_device *dev;
	int err;

	mutex_lock(&mei_mutex);
783 784 785 786 787 788

	if (!mei_quirk_probe(pdev, ent)) {
		err = -ENODEV;
		goto end;
	}

789
	if (mei_pdev) {
790 791 792 793 794 795
		err = -EEXIST;
		goto end;
	}
	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
796
		dev_err(&pdev->dev, "failed to enable pci device.\n");
797 798 799 800 801
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
802
	err = pci_request_regions(pdev, KBUILD_MODNAME);
803
	if (err) {
804
		dev_err(&pdev->dev, "failed to get pci regions.\n");
805 806 807 808 809 810 811 812 813 814 815
		goto disable_device;
	}
	/* allocates and initializes the mei dev structure */
	dev = mei_device_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	/* mapping  IO device memory */
	dev->mem_addr = pci_iomap(pdev, 0, 0);
	if (!dev->mem_addr) {
816
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
817 818 819 820 821 822 823 824 825 826
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_interrupt_thread_handler,
827
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
828 829 830 831
	else
		err = request_threaded_irq(pdev->irq,
			mei_interrupt_quick_handler,
			mei_interrupt_thread_handler,
832
			IRQF_SHARED, KBUILD_MODNAME, dev);
833 834

	if (err) {
835
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
836
		       pdev->irq);
837
		goto disable_msi;
838 839 840
	}
	INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
	if (mei_hw_init(dev)) {
841
		dev_err(&pdev->dev, "init hw failure.\n");
842 843 844 845 846 847 848 849
		err = -ENODEV;
		goto release_irq;
	}

	err = misc_register(&mei_misc_device);
	if (err)
		goto release_irq;

850
	mei_pdev = pdev;
851 852 853 854 855 856 857
	pci_set_drvdata(pdev, dev);


	schedule_delayed_work(&dev->timer_work, HZ);

	mutex_unlock(&mei_mutex);

858
	pr_debug("initialization successful.\n");
859 860 861 862 863 864 865 866 867

	return 0;

release_irq:
	/* disable interrupts */
	dev->host_hw_state = mei_hcsr_read(dev);
	mei_disable_interrupts(dev);
	flush_scheduled_work();
	free_irq(pdev->irq, dev);
868
disable_msi:
869 870 871 872 873 874 875 876 877 878
	pci_disable_msi(pdev);
	pci_iounmap(pdev, dev->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	mutex_unlock(&mei_mutex);
879
	dev_err(&pdev->dev, "initialization failed.\n");
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
	return err;
}

/**
 * mei_remove - Device Removal Routine
 *
 * @pdev: PCI device structure
 *
 * mei_remove is called by the PCI subsystem to alert the driver
 * that it should release a PCI device.
 */
static void __devexit mei_remove(struct pci_dev *pdev)
{
	struct mei_device *dev;

895
	if (mei_pdev != pdev)
896 897 898 899 900 901 902 903
		return;

	dev = pci_get_drvdata(pdev);
	if (!dev)
		return;

	mutex_lock(&dev->device_lock);

904 905 906
	cancel_delayed_work(&dev->timer_work);

	mei_wd_stop(dev);
907

908
	mei_pdev = NULL;
909 910 911 912 913 914 915 916 917 918 919

	if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
		dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
		mei_disconnect_host_client(dev, &dev->iamthif_cl);
	}
	if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
		dev->wd_cl.state = MEI_FILE_DISCONNECTING;
		mei_disconnect_host_client(dev, &dev->wd_cl);
	}

	/* Unregistering watchdog device */
920
	mei_watchdog_unregister(dev);
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947

	/* remove entry if already in list */
	dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
	mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
	mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);

	dev->iamthif_current_cb = NULL;
	dev->me_clients_num = 0;

	mutex_unlock(&dev->device_lock);

	flush_scheduled_work();

	/* disable interrupts */
	mei_disable_interrupts(dev);

	free_irq(pdev->irq, dev);
	pci_disable_msi(pdev);
	pci_set_drvdata(pdev, NULL);

	if (dev->mem_addr)
		pci_iounmap(pdev, dev->mem_addr);

	kfree(dev);

	pci_release_regions(pdev);
	pci_disable_device(pdev);
948 949

	misc_deregister(&mei_misc_device);
950
}
951 952 953 954 955 956 957 958 959 960
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct mei_device *dev = pci_get_drvdata(pdev);
	int err;

	if (!dev)
		return -ENODEV;
	mutex_lock(&dev->device_lock);
961 962 963

	cancel_delayed_work(&dev->timer_work);

964
	/* Stop watchdog if exists */
965
	err = mei_wd_stop(dev);
966
	/* Set new mei state */
967 968 969
	if (dev->dev_state == MEI_DEV_ENABLED ||
	    dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
		dev->dev_state = MEI_DEV_POWER_DOWN;
970 971 972 973 974
		mei_reset(dev, 0);
	}
	mutex_unlock(&dev->device_lock);

	free_irq(pdev->irq, dev);
975
	pci_disable_msi(pdev);
976 977 978 979 980 981 982 983 984 985 986 987 988 989

	return err;
}

static int mei_pci_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct mei_device *dev;
	int err;

	dev = pci_get_drvdata(pdev);
	if (!dev)
		return -ENODEV;

990 991 992 993 994 995 996
	pci_enable_msi(pdev);

	/* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_interrupt_thread_handler,
997
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
998 999
	else
		err = request_threaded_irq(pdev->irq,
1000 1001
			mei_interrupt_quick_handler,
			mei_interrupt_thread_handler,
1002
			IRQF_SHARED, KBUILD_MODNAME, dev);
1003

1004
	if (err) {
1005 1006
		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
				pdev->irq);
1007 1008 1009 1010
		return err;
	}

	mutex_lock(&dev->device_lock);
1011
	dev->dev_state = MEI_DEV_POWER_UP;
1012 1013 1014
	mei_reset(dev, 1);
	mutex_unlock(&dev->device_lock);

1015 1016 1017
	/* Start timer if stopped in suspend */
	schedule_delayed_work(&dev->timer_work, HZ);

1018 1019 1020 1021 1022
	return err;
}
static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
#define MEI_PM_OPS	(&mei_pm_ops)
#else
1023
#define MEI_PM_OPS	NULL
1024 1025 1026 1027 1028
#endif /* CONFIG_PM */
/*
 *  PCI driver structure
 */
static struct pci_driver mei_driver = {
1029
	.name = KBUILD_MODNAME,
1030 1031 1032 1033 1034 1035 1036
	.id_table = mei_pci_tbl,
	.probe = mei_probe,
	.remove = __devexit_p(mei_remove),
	.shutdown = __devexit_p(mei_remove),
	.driver.pm = MEI_PM_OPS,
};

T
Tomas Winkler 已提交
1037
module_pci_driver(mei_driver);
1038 1039 1040 1041

MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");