main.c 21.8 KB
Newer Older
1 2 3
/*
 *
 * Intel Management Engine Interface (Intel MEI) Linux driver
4
 * Copyright (c) 2003-2018, Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>
20
#include <linux/slab.h>
21 22 23 24 25 26 27 28
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
29
#include <linux/sched/signal.h>
30 31 32 33 34
#include <linux/uuid.h>
#include <linux/compat.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>

35
#include <linux/mei.h>
36 37

#include "mei_dev.h"
T
Tomas Winkler 已提交
38
#include "client.h"
39 40 41 42 43 44

/**
 * mei_open - the open function
 *
 * @inode: pointer to inode structure
 * @file: pointer to file structure
45
 *
46
 * Return: 0 on success, <0 on error
47 48 49 50
 */
static int mei_open(struct inode *inode, struct file *file)
{
	struct mei_device *dev;
51
	struct mei_cl *cl;
52

53
	int err;
54

55
	dev = container_of(inode->i_cdev, struct mei_device, cdev);
56
	if (!dev)
57
		return -ENODEV;
58 59

	mutex_lock(&dev->device_lock);
60

61
	if (dev->dev_state != MEI_DEV_ENABLED) {
62
		dev_dbg(dev->dev, "dev_state != MEI_ENABLED  dev_state = %s\n",
63
		    mei_dev_state_str(dev->dev_state));
64
		err = -ENODEV;
65
		goto err_unlock;
66
	}
67

68
	cl = mei_cl_alloc_linked(dev);
69 70
	if (IS_ERR(cl)) {
		err = PTR_ERR(cl);
71
		goto err_unlock;
72
	}
73

74
	cl->fp = file;
75
	file->private_data = cl;
76

77 78
	mutex_unlock(&dev->device_lock);

79
	return nonseekable_open(inode, file);
80

81
err_unlock:
82 83 84 85 86 87 88 89 90 91
	mutex_unlock(&dev->device_lock);
	return err;
}

/**
 * mei_release - the release function
 *
 * @inode: pointer to inode structure
 * @file: pointer to file structure
 *
92
 * Return: 0 on success, <0 on error
93 94 95 96 97
 */
static int mei_release(struct inode *inode, struct file *file)
{
	struct mei_cl *cl = file->private_data;
	struct mei_device *dev;
98
	int rets;
99 100 101 102 103 104 105

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	mutex_lock(&dev->device_lock);
106

107 108
	rets = mei_cl_disconnect(cl);

T
Tomas Winkler 已提交
109
	mei_cl_flush_queues(cl, file);
110
	cl_dbg(dev, cl, "removing\n");
111

T
Tomas Winkler 已提交
112
	mei_cl_unlink(cl);
113 114

	file->private_data = NULL;
115

116
	kfree(cl);
117

118 119 120 121 122 123 124 125 126 127 128 129 130
	mutex_unlock(&dev->device_lock);
	return rets;
}


/**
 * mei_read - the read function.
 *
 * @file: pointer to file structure
 * @ubuf: pointer to user buffer
 * @length: buffer length
 * @offset: data offset in buffer
 *
131
 * Return: >=0 data length on success , <0 on error
132 133
 */
static ssize_t mei_read(struct file *file, char __user *ubuf,
134
			size_t length, loff_t *offset)
135 136 137
{
	struct mei_cl *cl = file->private_data;
	struct mei_device *dev;
T
Tomas Winkler 已提交
138
	struct mei_cl_cb *cb = NULL;
139
	bool nonblock = !!(file->f_flags & O_NONBLOCK);
140
	ssize_t rets;
141 142 143 144 145 146

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

147

148
	mutex_lock(&dev->device_lock);
149
	if (dev->dev_state != MEI_DEV_ENABLED) {
150 151 152 153
		rets = -ENODEV;
		goto out;
	}

154 155 156 157 158
	if (length == 0) {
		rets = 0;
		goto out;
	}

159 160 161 162 163
	if (ubuf == NULL) {
		rets = -EMSGSIZE;
		goto out;
	}

T
Tomas Winkler 已提交
164
	cb = mei_cl_read_cb(cl, file);
165 166 167 168
	if (cb)
		goto copy_buffer;

	if (*offset > 0)
169 170
		*offset = 0;

171 172
	rets = mei_cl_read_start(cl, length, file);
	if (rets && rets != -EBUSY) {
173
		cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
174 175 176
		goto out;
	}

177 178 179 180 181
	if (nonblock) {
		rets = -EAGAIN;
		goto out;
	}

182 183 184 185 186 187 188 189 190
	mutex_unlock(&dev->device_lock);
	if (wait_event_interruptible(cl->rx_wait,
				     !list_empty(&cl->rd_completed) ||
				     !mei_cl_is_connected(cl))) {
		if (signal_pending(current))
			return -EINTR;
		return -ERESTARTSYS;
	}
	mutex_lock(&dev->device_lock);
191

192 193 194 195
	if (!mei_cl_is_connected(cl)) {
		rets = -ENODEV;
		goto out;
	}
196

197 198 199 200 201
	cb = mei_cl_read_cb(cl, file);
	if (!cb) {
		rets = 0;
		goto out;
	}
202

203
copy_buffer:
204 205 206
	/* now copy the data to user space */
	if (cb->status) {
		rets = cb->status;
207
		cl_dbg(dev, cl, "read operation failed %zd\n", rets);
208 209 210
		goto free;
	}

211
	cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
212 213 214
	       cb->buf.size, cb->buf_idx, *offset);
	if (*offset >= cb->buf_idx) {
		rets = 0;
215 216 217
		goto free;
	}

218 219 220
	/* length is being truncated to PAGE_SIZE,
	 * however buf_idx may point beyond that */
	length = min_t(size_t, length, cb->buf_idx - *offset);
221

222
	if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
223
		dev_dbg(dev->dev, "failed to copy data to userland\n");
224 225 226 227 228 229
		rets = -EFAULT;
		goto free;
	}

	rets = length;
	*offset += length;
230 231
	/* not all data was read, keep the cb */
	if (*offset < cb->buf_idx)
232 233 234
		goto out;

free:
235
	mei_io_cb_free(cb);
236
	*offset = 0;
237

238
out:
239
	cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
240 241 242 243 244 245 246 247 248 249 250
	mutex_unlock(&dev->device_lock);
	return rets;
}
/**
 * mei_write - the write function.
 *
 * @file: pointer to file structure
 * @ubuf: pointer to user buffer
 * @length: buffer length
 * @offset: data offset in buffer
 *
251
 * Return: >=0 data length on success , <0 on error
252 253
 */
static ssize_t mei_write(struct file *file, const char __user *ubuf,
254
			 size_t length, loff_t *offset)
255 256
{
	struct mei_cl *cl = file->private_data;
257
	struct mei_cl_cb *cb;
258
	struct mei_device *dev;
259
	ssize_t rets;
260 261 262 263 264 265 266 267

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	mutex_lock(&dev->device_lock);

268
	if (dev->dev_state != MEI_DEV_ENABLED) {
269
		rets = -ENODEV;
T
Tomas Winkler 已提交
270
		goto out;
271 272
	}

273 274 275
	if (!mei_cl_is_connected(cl)) {
		cl_err(dev, cl, "is not connected");
		rets = -ENODEV;
T
Tomas Winkler 已提交
276
		goto out;
277
	}
278

279 280
	if (!mei_me_cl_is_active(cl->me_cl)) {
		rets = -ENOTTY;
281 282 283
		goto out;
	}

284
	if (length > mei_cl_mtu(cl)) {
285
		rets = -EFBIG;
T
Tomas Winkler 已提交
286
		goto out;
287 288
	}

289 290
	if (length == 0) {
		rets = 0;
T
Tomas Winkler 已提交
291
		goto out;
292
	}
293

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	while (cl->tx_cb_queued >= dev->tx_queue_limit) {
		if (file->f_flags & O_NONBLOCK) {
			rets = -EAGAIN;
			goto out;
		}
		mutex_unlock(&dev->device_lock);
		rets = wait_event_interruptible(cl->tx_wait,
				cl->writing_state == MEI_WRITE_COMPLETE ||
				(!mei_cl_is_connected(cl)));
		mutex_lock(&dev->device_lock);
		if (rets) {
			if (signal_pending(current))
				rets = -EINTR;
			goto out;
		}
		if (!mei_cl_is_connected(cl)) {
			rets = -ENODEV;
			goto out;
		}
	}

315 316
	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
	if (!cb) {
317
		rets = -ENOMEM;
T
Tomas Winkler 已提交
318
		goto out;
319 320
	}

321
	rets = copy_from_user(cb->buf.data, ubuf, length);
322
	if (rets) {
323
		dev_dbg(dev->dev, "failed to copy data from userland\n");
324
		rets = -EFAULT;
325
		mei_io_cb_free(cb);
T
Tomas Winkler 已提交
326
		goto out;
327
	}
328

329
	rets = mei_cl_write(cl, cb);
330
out:
331 332 333 334
	mutex_unlock(&dev->device_lock);
	return rets;
}

335 336 337 338
/**
 * mei_ioctl_connect_client - the connect to fw client IOCTL function
 *
 * @file: private data of the file object
339
 * @data: IOCTL connect data, input and output parameters
340 341 342
 *
 * Locking: called under "dev->device_lock" lock
 *
343
 * Return: 0 on success, <0 on failure.
344 345 346 347 348 349
 */
static int mei_ioctl_connect_client(struct file *file,
			struct mei_connect_client_data *data)
{
	struct mei_device *dev;
	struct mei_client *client;
350
	struct mei_me_client *me_cl;
351 352 353 354 355 356
	struct mei_cl *cl;
	int rets;

	cl = file->private_data;
	dev = cl->dev;

357 358
	if (dev->dev_state != MEI_DEV_ENABLED)
		return -ENODEV;
359 360

	if (cl->state != MEI_FILE_INITIALIZING &&
361 362
	    cl->state != MEI_FILE_DISCONNECTED)
		return  -EBUSY;
363 364

	/* find ME client we're trying to connect to */
365
	me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
366
	if (!me_cl) {
367
		dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
368
			&data->in_client_uuid);
369 370 371 372 373 374 375 376 377 378 379 380 381
		rets = -ENOTTY;
		goto end;
	}

	if (me_cl->props.fixed_address) {
		bool forbidden = dev->override_fixed_address ?
			 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
		if (forbidden) {
			dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
				&data->in_client_uuid);
			rets = -ENOTTY;
			goto end;
		}
382 383
	}

384
	dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
385
			me_cl->client_id);
386
	dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
387
			me_cl->props.protocol_version);
388
	dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
389
			me_cl->props.max_msg_length);
390 391 392

	/* prepare the output buffer */
	client = &data->out_client_properties;
393 394
	client->max_msg_length = me_cl->props.max_msg_length;
	client->protocol_version = me_cl->props.protocol_version;
395
	dev_dbg(dev->dev, "Can connect?\n");
396

397
	rets = mei_cl_connect(cl, me_cl, file);
398 399

end:
400
	mei_me_cl_put(me_cl);
401 402 403
	return rets;
}

404 405 406 407 408 409 410 411 412
/**
 * mei_ioctl_client_notify_request -
 *     propagate event notification request to client
 *
 * @file: pointer to file structure
 * @request: 0 - disable, 1 - enable
 *
 * Return: 0 on success , <0 on error
 */
413
static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
414 415 416
{
	struct mei_cl *cl = file->private_data;

417 418 419 420 421
	if (request != MEI_HBM_NOTIFICATION_START &&
	    request != MEI_HBM_NOTIFICATION_STOP)
		return -EINVAL;

	return mei_cl_notify_request(cl, file, (u8)request);
422 423 424 425 426 427 428 429 430 431
}

/**
 * mei_ioctl_client_notify_get -  wait for notification request
 *
 * @file: pointer to file structure
 * @notify_get: 0 - disable, 1 - enable
 *
 * Return: 0 on success , <0 on error
 */
432
static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
433 434 435 436 437 438 439 440 441 442 443 444 445 446
{
	struct mei_cl *cl = file->private_data;
	bool notify_ev;
	bool block = (file->f_flags & O_NONBLOCK) == 0;
	int rets;

	rets = mei_cl_notify_get(cl, block, &notify_ev);
	if (rets)
		return rets;

	*notify_get = notify_ev ? 1 : 0;
	return 0;
}

447 448 449 450 451 452 453
/**
 * mei_ioctl - the IOCTL function
 *
 * @file: pointer to file structure
 * @cmd: ioctl command
 * @data: pointer to mei message structure
 *
454
 * Return: 0 on success , <0 on error
455 456 457 458 459
 */
static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
	struct mei_device *dev;
	struct mei_cl *cl = file->private_data;
460
	struct mei_connect_client_data connect_data;
461
	u32 notify_get, notify_req;
462 463 464 465 466 467 468 469
	int rets;


	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

470
	dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd);
471 472

	mutex_lock(&dev->device_lock);
473
	if (dev->dev_state != MEI_DEV_ENABLED) {
474 475 476 477
		rets = -ENODEV;
		goto out;
	}

478 479
	switch (cmd) {
	case IOCTL_MEI_CONNECT_CLIENT:
480
		dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
481
		if (copy_from_user(&connect_data, (char __user *)data,
482
				sizeof(struct mei_connect_client_data))) {
483
			dev_dbg(dev->dev, "failed to copy data from userland\n");
484 485 486
			rets = -EFAULT;
			goto out;
		}
487

488
		rets = mei_ioctl_connect_client(file, &connect_data);
489 490
		if (rets)
			goto out;
491

492
		/* if all is ok, copying the data back to user. */
493
		if (copy_to_user((char __user *)data, &connect_data,
494
				sizeof(struct mei_connect_client_data))) {
495
			dev_dbg(dev->dev, "failed to copy data to userland\n");
496 497 498 499 500
			rets = -EFAULT;
			goto out;
		}

		break;
501

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
	case IOCTL_MEI_NOTIFY_SET:
		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
		if (copy_from_user(&notify_req,
				   (char __user *)data, sizeof(notify_req))) {
			dev_dbg(dev->dev, "failed to copy data from userland\n");
			rets = -EFAULT;
			goto out;
		}
		rets = mei_ioctl_client_notify_request(file, notify_req);
		break;

	case IOCTL_MEI_NOTIFY_GET:
		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
		rets = mei_ioctl_client_notify_get(file, &notify_get);
		if (rets)
			goto out;

		dev_dbg(dev->dev, "copy connect data to user\n");
		if (copy_to_user((char __user *)data,
				&notify_get, sizeof(notify_get))) {
			dev_dbg(dev->dev, "failed to copy data to userland\n");
			rets = -EFAULT;
			goto out;

		}
		break;

529 530
	default:
		rets = -ENOIOCTLCMD;
531 532 533 534 535 536 537 538 539 540 541 542 543 544
	}

out:
	mutex_unlock(&dev->device_lock);
	return rets;
}

/**
 * mei_compat_ioctl - the compat IOCTL function
 *
 * @file: pointer to file structure
 * @cmd: ioctl command
 * @data: pointer to mei message structure
 *
545
 * Return: 0 on success , <0 on error
546 547 548
 */
#ifdef CONFIG_COMPAT
static long mei_compat_ioctl(struct file *file,
549
			unsigned int cmd, unsigned long data)
550 551 552 553 554 555 556 557 558 559 560 561
{
	return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
}
#endif


/**
 * mei_poll - the poll function
 *
 * @file: pointer to file structure
 * @wait: pointer to poll_table structure
 *
562
 * Return: poll mask
563
 */
564
static __poll_t mei_poll(struct file *file, poll_table *wait)
565
{
A
Al Viro 已提交
566
	__poll_t req_events = poll_requested_events(wait);
567 568
	struct mei_cl *cl = file->private_data;
	struct mei_device *dev;
569
	__poll_t mask = 0;
570
	bool notify_en;
571 572

	if (WARN_ON(!cl || !cl->dev))
573
		return EPOLLERR;
574 575 576 577 578

	dev = cl->dev;

	mutex_lock(&dev->device_lock);

579
	notify_en = cl->notify_en && (req_events & EPOLLPRI);
580 581 582

	if (dev->dev_state != MEI_DEV_ENABLED ||
	    !mei_cl_is_connected(cl)) {
583
		mask = EPOLLERR;
584 585 586
		goto out;
	}

587 588 589
	if (notify_en) {
		poll_wait(file, &cl->ev_wait, wait);
		if (cl->notify_ev)
590
			mask |= EPOLLPRI;
591
	}
592

593
	if (req_events & (EPOLLIN | EPOLLRDNORM)) {
T
Tomas Winkler 已提交
594 595 596
		poll_wait(file, &cl->rx_wait, wait);

		if (!list_empty(&cl->rd_completed))
597
			mask |= EPOLLIN | EPOLLRDNORM;
T
Tomas Winkler 已提交
598
		else
599
			mei_cl_read_start(cl, mei_cl_mtu(cl), file);
T
Tomas Winkler 已提交
600
	}
601

602 603 604 605 606 607
	if (req_events & (POLLOUT | POLLWRNORM)) {
		poll_wait(file, &cl->tx_wait, wait);
		if (cl->tx_cb_queued < dev->tx_queue_limit)
			mask |= POLLOUT | POLLWRNORM;
	}

608 609 610 611 612
out:
	mutex_unlock(&dev->device_lock);
	return mask;
}

A
Alexander Usyskin 已提交
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
/**
 * mei_cl_is_write_queued - check if the client has pending writes.
 *
 * @cl: writing host client
 *
 * Return: true if client is writing, false otherwise.
 */
static bool mei_cl_is_write_queued(struct mei_cl *cl)
{
	struct mei_device *dev = cl->dev;
	struct mei_cl_cb *cb;

	list_for_each_entry(cb, &dev->write_list, list)
		if (cb->cl == cl)
			return true;
	list_for_each_entry(cb, &dev->write_waiting_list, list)
		if (cb->cl == cl)
			return true;
	return false;
}

/**
 * mei_fsync - the fsync handler
 *
 * @fp:       pointer to file structure
 * @start:    unused
 * @end:      unused
 * @datasync: unused
 *
 * Return: 0 on success, -ENODEV if client is not connected
 */
static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
{
	struct mei_cl *cl = fp->private_data;
	struct mei_device *dev;
	int rets;

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	mutex_lock(&dev->device_lock);

	if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
		rets = -ENODEV;
		goto out;
	}

	while (mei_cl_is_write_queued(cl)) {
		mutex_unlock(&dev->device_lock);
		rets = wait_event_interruptible(cl->tx_wait,
				cl->writing_state == MEI_WRITE_COMPLETE ||
				!mei_cl_is_connected(cl));
		mutex_lock(&dev->device_lock);
		if (rets) {
			if (signal_pending(current))
				rets = -EINTR;
			goto out;
		}
		if (!mei_cl_is_connected(cl)) {
			rets = -ENODEV;
			goto out;
		}
	}
	rets = 0;
out:
	mutex_unlock(&dev->device_lock);
	return rets;
}

684 685 686 687 688 689 690
/**
 * mei_fasync - asynchronous io support
 *
 * @fd: file descriptor
 * @file: pointer to file structure
 * @band: band bitmap
 *
691 692 693
 * Return: negative on error,
 *         0 if it did no changes,
 *         and positive a process was added or deleted
694 695 696 697 698 699 700
 */
static int mei_fasync(int fd, struct file *file, int band)
{

	struct mei_cl *cl = file->private_data;

	if (!mei_cl_is_connected(cl))
701
		return -ENODEV;
702 703 704 705

	return fasync_helper(fd, file, band, &cl->ev_async);
}

706
/**
707
 * fw_status_show - mei device fw_status attribute show method
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
 *
 * @device: device pointer
 * @attr: attribute pointer
 * @buf:  char out buffer
 *
 * Return: number of the bytes printed into buf or error
 */
static ssize_t fw_status_show(struct device *device,
		struct device_attribute *attr, char *buf)
{
	struct mei_device *dev = dev_get_drvdata(device);
	struct mei_fw_status fw_status;
	int err, i;
	ssize_t cnt = 0;

	mutex_lock(&dev->device_lock);
	err = mei_fw_status(dev, &fw_status);
	mutex_unlock(&dev->device_lock);
	if (err) {
		dev_err(device, "read fw_status error = %d\n", err);
		return err;
	}

	for (i = 0; i < fw_status.count; i++)
		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
				fw_status.status[i]);
	return cnt;
}
static DEVICE_ATTR_RO(fw_status);

738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
/**
 * hbm_ver_show - display HBM protocol version negotiated with FW
 *
 * @device: device pointer
 * @attr: attribute pointer
 * @buf:  char out buffer
 *
 * Return: number of the bytes printed into buf or error
 */
static ssize_t hbm_ver_show(struct device *device,
			    struct device_attribute *attr, char *buf)
{
	struct mei_device *dev = dev_get_drvdata(device);
	struct hbm_version ver;

	mutex_lock(&dev->device_lock);
	ver = dev->version;
	mutex_unlock(&dev->device_lock);

	return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
}
static DEVICE_ATTR_RO(hbm_ver);

/**
 * hbm_ver_drv_show - display HBM protocol version advertised by driver
 *
 * @device: device pointer
 * @attr: attribute pointer
 * @buf:  char out buffer
 *
 * Return: number of the bytes printed into buf or error
 */
static ssize_t hbm_ver_drv_show(struct device *device,
				struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
}
static DEVICE_ATTR_RO(hbm_ver_drv);

777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
static ssize_t tx_queue_limit_show(struct device *device,
				   struct device_attribute *attr, char *buf)
{
	struct mei_device *dev = dev_get_drvdata(device);
	u8 size = 0;

	mutex_lock(&dev->device_lock);
	size = dev->tx_queue_limit;
	mutex_unlock(&dev->device_lock);

	return snprintf(buf, PAGE_SIZE, "%u\n", size);
}

static ssize_t tx_queue_limit_store(struct device *device,
				    struct device_attribute *attr,
				    const char *buf, size_t count)
{
	struct mei_device *dev = dev_get_drvdata(device);
	u8 limit;
	unsigned int inp;
	int err;

	err = kstrtouint(buf, 10, &inp);
	if (err)
		return err;
	if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN)
		return -EINVAL;
	limit = inp;

	mutex_lock(&dev->device_lock);
	dev->tx_queue_limit = limit;
	mutex_unlock(&dev->device_lock);

	return count;
}
static DEVICE_ATTR_RW(tx_queue_limit);

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
/**
 * fw_ver_show - display ME FW version
 *
 * @device: device pointer
 * @attr: attribute pointer
 * @buf:  char out buffer
 *
 * Return: number of the bytes printed into buf or error
 */
static ssize_t fw_ver_show(struct device *device,
			   struct device_attribute *attr, char *buf)
{
	struct mei_device *dev = dev_get_drvdata(device);
	struct mei_fw_version *ver;
	ssize_t cnt = 0;
	int i;

	ver = dev->fw_ver;

	for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
				 ver[i].platform, ver[i].major, ver[i].minor,
				 ver[i].hotfix, ver[i].buildno);
	return cnt;
}
static DEVICE_ATTR_RO(fw_ver);

841 842
static struct attribute *mei_attrs[] = {
	&dev_attr_fw_status.attr,
843 844
	&dev_attr_hbm_ver.attr,
	&dev_attr_hbm_ver_drv.attr,
845
	&dev_attr_tx_queue_limit.attr,
846
	&dev_attr_fw_ver.attr,
847 848 849 850
	NULL
};
ATTRIBUTE_GROUPS(mei);

851 852 853 854 855 856 857 858 859 860 861 862 863 864
/*
 * file operations structure will be used for mei char device.
 */
static const struct file_operations mei_fops = {
	.owner = THIS_MODULE,
	.read = mei_read,
	.unlocked_ioctl = mei_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl = mei_compat_ioctl,
#endif
	.open = mei_open,
	.release = mei_release,
	.write = mei_write,
	.poll = mei_poll,
A
Alexander Usyskin 已提交
865
	.fsync = mei_fsync,
866
	.fasync = mei_fasync,
867 868 869
	.llseek = no_llseek
};

870 871 872 873 874 875 876 877 878 879 880
static struct class *mei_class;
static dev_t mei_devt;
#define MEI_MAX_DEVS  MINORMASK
static DEFINE_MUTEX(mei_minor_lock);
static DEFINE_IDR(mei_idr);

/**
 * mei_minor_get - obtain next free device minor number
 *
 * @dev:  device pointer
 *
881
 * Return: allocated minor, or -ENOSPC if no free minor left
882
 */
883 884 885 886 887 888 889 890 891
static int mei_minor_get(struct mei_device *dev)
{
	int ret;

	mutex_lock(&mei_minor_lock);
	ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
	if (ret >= 0)
		dev->minor = ret;
	else if (ret == -ENOSPC)
892
		dev_err(dev->dev, "too many mei devices\n");
893

894 895 896
	mutex_unlock(&mei_minor_lock);
	return ret;
}
T
Tomas Winkler 已提交
897

898 899 900 901 902 903
/**
 * mei_minor_free - mark device minor number as free
 *
 * @dev:  device pointer
 */
static void mei_minor_free(struct mei_device *dev)
904
{
905 906 907 908 909 910 911 912 913 914 915 916
	mutex_lock(&mei_minor_lock);
	idr_remove(&mei_idr, dev->minor);
	mutex_unlock(&mei_minor_lock);
}

int mei_register(struct mei_device *dev, struct device *parent)
{
	struct device *clsdev; /* class device */
	int ret, devno;

	ret = mei_minor_get(dev);
	if (ret < 0)
T
Tomas Winkler 已提交
917 918
		return ret;

919 920 921
	/* Fill in the data structures */
	devno = MKDEV(MAJOR(mei_devt), dev->minor);
	cdev_init(&dev->cdev, &mei_fops);
922
	dev->cdev.owner = parent->driver->owner;
923 924 925 926 927 928 929 930 931

	/* Add the device */
	ret = cdev_add(&dev->cdev, devno, 1);
	if (ret) {
		dev_err(parent, "unable to add device %d:%d\n",
			MAJOR(mei_devt), dev->minor);
		goto err_dev_add;
	}

932 933 934
	clsdev = device_create_with_groups(mei_class, parent, devno,
					   dev, mei_groups,
					   "mei%d", dev->minor);
935 936 937 938 939 940 941 942 943 944 945 946 947

	if (IS_ERR(clsdev)) {
		dev_err(parent, "unable to create device %d:%d\n",
			MAJOR(mei_devt), dev->minor);
		ret = PTR_ERR(clsdev);
		goto err_dev_create;
	}

	ret = mei_dbgfs_register(dev, dev_name(clsdev));
	if (ret) {
		dev_err(clsdev, "cannot register debugfs ret = %d\n", ret);
		goto err_dev_dbgfs;
	}
T
Tomas Winkler 已提交
948 949

	return 0;
950 951 952 953 954 955 956 957

err_dev_dbgfs:
	device_destroy(mei_class, devno);
err_dev_create:
	cdev_del(&dev->cdev);
err_dev_add:
	mei_minor_free(dev);
	return ret;
958
}
959
EXPORT_SYMBOL_GPL(mei_register);
960

T
Tomas Winkler 已提交
961
void mei_deregister(struct mei_device *dev)
962
{
963 964 965 966 967
	int devno;

	devno = dev->cdev.dev;
	cdev_del(&dev->cdev);

T
Tomas Winkler 已提交
968
	mei_dbgfs_deregister(dev);
969 970 971 972

	device_destroy(mei_class, devno);

	mei_minor_free(dev);
973
}
974
EXPORT_SYMBOL_GPL(mei_deregister);
975

976 977
static int __init mei_init(void)
{
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	int ret;

	mei_class = class_create(THIS_MODULE, "mei");
	if (IS_ERR(mei_class)) {
		pr_err("couldn't create class\n");
		ret = PTR_ERR(mei_class);
		goto err;
	}

	ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
	if (ret < 0) {
		pr_err("unable to allocate char dev region\n");
		goto err_class;
	}

	ret = mei_cl_bus_init();
	if (ret < 0) {
		pr_err("unable to initialize bus\n");
		goto err_chrdev;
	}

	return 0;

err_chrdev:
	unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
err_class:
	class_destroy(mei_class);
err:
	return ret;
1007 1008 1009 1010
}

static void __exit mei_exit(void)
{
1011 1012
	unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
	class_destroy(mei_class);
1013 1014 1015 1016 1017 1018
	mei_cl_bus_exit();
}

module_init(mei_init);
module_exit(mei_exit);

1019 1020
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1021
MODULE_LICENSE("GPL v2");
1022