client.c 22.6 KB
Newer Older
1 2 3
/*
 *
 * Intel Management Engine Interface (Intel MEI) Linux driver
4
 * Copyright (c) 2003-2012, Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 */

#include <linux/pci.h>
#include <linux/sched.h>
19 20
#include <linux/wait.h>
#include <linux/delay.h>
21
#include <linux/pm_runtime.h>
22

23
#include <linux/mei.h>
24 25

#include "mei_dev.h"
26
#include "hbm.h"
T
Tomas Winkler 已提交
27 28 29 30 31 32
#include "client.h"

/**
 * mei_me_cl_by_uuid - locate index of me client
 *
 * @dev: mei device
33 34 35
 *
 * Locking: called under "dev->device_lock" lock
 *
T
Tomas Winkler 已提交
36 37 38 39
 * returns me client index or -ENOENT if not found
 */
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
{
40
	int i;
T
Tomas Winkler 已提交
41 42 43

	for (i = 0; i < dev->me_clients_num; ++i)
		if (uuid_le_cmp(*uuid,
44 45
				dev->me_clients[i].props.protocol_name) == 0)
			return i;
T
Tomas Winkler 已提交
46

47
	return -ENOENT;
T
Tomas Winkler 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
}


/**
 * mei_me_cl_by_id return index to me_clients for client_id
 *
 * @dev: the device structure
 * @client_id: me client id
 *
 * Locking: called under "dev->device_lock" lock
 *
 * returns index on success, -ENOENT on failure.
 */

int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
	int i;
65

T
Tomas Winkler 已提交
66 67
	for (i = 0; i < dev->me_clients_num; i++)
		if (dev->me_clients[i].client_id == client_id)
68
			return i;
T
Tomas Winkler 已提交
69

70
	return -ENOENT;
T
Tomas Winkler 已提交
71
}
72

73 74

/**
75
 * mei_cl_cmp_id - tells if the clients are the same
76
 *
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
 * @cl1: host client 1
 * @cl2: host client 2
 *
 * returns true  - if the clients has same host and me ids
 *         false - otherwise
 */
static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
				const struct mei_cl *cl2)
{
	return cl1 && cl2 &&
		(cl1->host_client_id == cl2->host_client_id) &&
		(cl1->me_client_id == cl2->me_client_id);
}

/**
 * mei_io_list_flush - removes cbs belonging to cl.
 *
 * @list:  an instance of our list structure
 * @cl:    host client, can be NULL for flushing the whole list
 * @free:  whether to free the cbs
97
 */
98 99
static void __mei_io_list_flush(struct mei_cl_cb *list,
				struct mei_cl *cl, bool free)
100 101 102 103
{
	struct mei_cl_cb *cb;
	struct mei_cl_cb *next;

104
	/* enable removing everything if no cl is specified */
105
	list_for_each_entry_safe(cb, next, &list->list, list) {
106
		if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
107
			list_del(&cb->list);
108 109 110
			if (free)
				mei_io_cb_free(cb);
		}
111 112 113
	}
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
/**
 * mei_io_list_flush - removes list entry belonging to cl.
 *
 * @list:  An instance of our list structure
 * @cl: host client
 */
static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
{
	__mei_io_list_flush(list, cl, false);
}


/**
 * mei_io_list_free - removes cb belonging to cl and free them
 *
 * @list:  An instance of our list structure
 * @cl: host client
 */
static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
{
	__mei_io_list_flush(list, cl, true);
}

137 138 139 140 141 142 143 144 145 146 147 148 149 150
/**
 * mei_io_cb_free - free mei_cb_private related memory
 *
 * @cb: mei callback struct
 */
void mei_io_cb_free(struct mei_cl_cb *cb)
{
	if (cb == NULL)
		return;

	kfree(cb->request_buffer.data);
	kfree(cb->response_buffer.data);
	kfree(cb);
}
151

152 153 154 155
/**
 * mei_io_cb_init - allocate and initialize io callback
 *
 * @cl - mei client
156
 * @fp: pointer to file structure
157 158 159 160 161 162 163 164 165 166 167 168 169 170
 *
 * returns mei_cl_cb pointer or NULL;
 */
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
{
	struct mei_cl_cb *cb;

	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
	if (!cb)
		return NULL;

	mei_io_list_init(cb);

	cb->file_object = fp;
171
	cb->cl = cl;
172 173 174 175 176 177 178
	cb->buf_idx = 0;
	return cb;
}

/**
 * mei_io_cb_alloc_req_buf - allocate request buffer
 *
179 180
 * @cb: io callback structure
 * @length: size of the buffer
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
 *
 * returns 0 on success
 *         -EINVAL if cb is NULL
 *         -ENOMEM if allocation failed
 */
int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
{
	if (!cb)
		return -EINVAL;

	if (length == 0)
		return 0;

	cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
	if (!cb->request_buffer.data)
		return -ENOMEM;
	cb->request_buffer.size = length;
	return 0;
}
/**
201
 * mei_io_cb_alloc_resp_buf - allocate response buffer
202
 *
203 204
 * @cb: io callback structure
 * @length: size of the buffer
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
 *
 * returns 0 on success
 *         -EINVAL if cb is NULL
 *         -ENOMEM if allocation failed
 */
int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
{
	if (!cb)
		return -EINVAL;

	if (length == 0)
		return 0;

	cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
	if (!cb->response_buffer.data)
		return -ENOMEM;
	cb->response_buffer.size = length;
	return 0;
}

225

226 227 228 229 230 231 232 233

/**
 * mei_cl_flush_queues - flushes queue lists belonging to cl.
 *
 * @cl: host client
 */
int mei_cl_flush_queues(struct mei_cl *cl)
{
234 235
	struct mei_device *dev;

T
Tomas Winkler 已提交
236
	if (WARN_ON(!cl || !cl->dev))
237 238
		return -EINVAL;

239 240 241
	dev = cl->dev;

	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
242
	mei_io_list_flush(&cl->dev->read_list, cl);
243 244
	mei_io_list_free(&cl->dev->write_list, cl);
	mei_io_list_free(&cl->dev->write_waiting_list, cl);
245 246 247 248 249 250 251
	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
	return 0;
}

252

253
/**
254
 * mei_cl_init - initializes cl.
255 256 257 258 259 260 261 262 263 264 265
 *
 * @cl: host client to be initialized
 * @dev: mei device
 */
void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
	memset(cl, 0, sizeof(struct mei_cl));
	init_waitqueue_head(&cl->wait);
	init_waitqueue_head(&cl->rx_wait);
	init_waitqueue_head(&cl->tx_wait);
	INIT_LIST_HEAD(&cl->link);
266
	INIT_LIST_HEAD(&cl->device_link);
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
	cl->reading_state = MEI_IDLE;
	cl->writing_state = MEI_IDLE;
	cl->dev = dev;
}

/**
 * mei_cl_allocate - allocates cl  structure and sets it up.
 *
 * @dev: mei device
 * returns  The allocated file or NULL on failure
 */
struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
	struct mei_cl *cl;

	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
	if (!cl)
		return NULL;

	mei_cl_init(cl, dev);

	return cl;
}

T
Tomas Winkler 已提交
291 292 293
/**
 * mei_cl_find_read_cb - find this cl's callback in the read list
 *
294 295
 * @cl: host client
 *
T
Tomas Winkler 已提交
296 297 298 299 300
 * returns cb on success, NULL on error
 */
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
{
	struct mei_device *dev = cl->dev;
301
	struct mei_cl_cb *cb;
T
Tomas Winkler 已提交
302

303
	list_for_each_entry(cb, &dev->read_list.list, list)
T
Tomas Winkler 已提交
304 305 306 307 308
		if (mei_cl_cmp_id(cl, cb->cl))
			return cb;
	return NULL;
}

309
/** mei_cl_link: allocate host id in the host map
310
 *
311
 * @cl - host client
312
 * @id - fixed host id or -1 for generic one
313
 *
314
 * returns 0 on success
315 316 317
 *	-EINVAL on incorrect values
 *	-ENONET if client not found
 */
318
int mei_cl_link(struct mei_cl *cl, int id)
319
{
T
Tomas Winkler 已提交
320
	struct mei_device *dev;
T
Tomas Winkler 已提交
321
	long open_handle_count;
322

323
	if (WARN_ON(!cl || !cl->dev))
324 325
		return -EINVAL;

T
Tomas Winkler 已提交
326 327
	dev = cl->dev;

328
	/* If Id is not assigned get one*/
329 330 331
	if (id == MEI_HOST_CLIENT_ID_ANY)
		id = find_first_zero_bit(dev->host_clients_map,
					MEI_CLIENTS_MAX);
332

333
	if (id >= MEI_CLIENTS_MAX) {
334
		dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
335 336 337
		return -EMFILE;
	}

T
Tomas Winkler 已提交
338 339
	open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
	if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
340
		dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
341 342
			MEI_MAX_OPEN_HANDLE_COUNT);
		return -EMFILE;
343 344
	}

345 346 347 348 349 350 351 352 353
	dev->open_handle_count++;

	cl->host_client_id = id;
	list_add_tail(&cl->link, &dev->file_list);

	set_bit(id, dev->host_clients_map);

	cl->state = MEI_FILE_INITIALIZING;

354
	cl_dbg(dev, cl, "link cl\n");
355
	return 0;
356
}
357

358
/**
T
Tomas Winkler 已提交
359
 * mei_cl_unlink - remove me_cl from the list
360
 *
361
 * @cl: host client
362
 */
T
Tomas Winkler 已提交
363
int mei_cl_unlink(struct mei_cl *cl)
364
{
T
Tomas Winkler 已提交
365 366
	struct mei_device *dev;

367 368 369 370
	/* don't shout on error exit path */
	if (!cl)
		return 0;

371 372 373
	/* wd and amthif might not be initialized */
	if (!cl->dev)
		return 0;
T
Tomas Winkler 已提交
374 375 376

	dev = cl->dev;

377 378
	cl_dbg(dev, cl, "unlink client");

T
Tomas Winkler 已提交
379 380 381 382 383 384 385 386 387 388 389
	if (dev->open_handle_count > 0)
		dev->open_handle_count--;

	/* never clear the 0 bit */
	if (cl->host_client_id)
		clear_bit(cl->host_client_id, dev->host_clients_map);

	list_del_init(&cl->link);

	cl->state = MEI_FILE_INITIALIZING;

T
Tomas Winkler 已提交
390
	return 0;
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
}


void mei_host_client_init(struct work_struct *work)
{
	struct mei_device *dev = container_of(work,
					      struct mei_device, init_work);
	struct mei_client_properties *client_props;
	int i;

	mutex_lock(&dev->device_lock);

	for (i = 0; i < dev->me_clients_num; i++) {
		client_props = &dev->me_clients[i].props;

406
		if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
407 408 409
			mei_amthif_host_init(dev);
		else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
			mei_wd_host_init(dev);
410 411 412
		else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
			mei_nfc_host_init(dev);

413 414 415
	}

	dev->dev_state = MEI_DEV_ENABLED;
416
	dev->reset_count = 0;
417 418

	mutex_unlock(&dev->device_lock);
419 420 421 422

	pm_runtime_mark_last_busy(&dev->pdev->dev);
	dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n");
	pm_runtime_autosuspend(&dev->pdev->dev);
423 424
}

425 426 427 428 429 430 431 432
/**
 * mei_hbuf_acquire: try to acquire host buffer
 *
 * @dev: the device structure
 * returns true if host buffer was acquired
 */
bool mei_hbuf_acquire(struct mei_device *dev)
{
433 434 435 436 437 438
	if (mei_pg_state(dev) == MEI_PG_ON ||
	    dev->pg_event == MEI_PG_EVENT_WAIT) {
		dev_dbg(&dev->pdev->dev, "device is in pg\n");
		return false;
	}

439 440 441 442 443 444 445 446 447
	if (!dev->hbuf_is_ready) {
		dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
		return false;
	}

	dev->hbuf_is_ready = false;

	return true;
}
448 449

/**
450
 * mei_cl_disconnect - disconnect host client from the me one
451
 *
T
Tomas Winkler 已提交
452
 * @cl: host client
453 454 455 456 457
 *
 * Locking: called under "dev->device_lock" lock
 *
 * returns 0 on success, <0 on failure.
 */
T
Tomas Winkler 已提交
458
int mei_cl_disconnect(struct mei_cl *cl)
459
{
T
Tomas Winkler 已提交
460
	struct mei_device *dev;
461 462 463
	struct mei_cl_cb *cb;
	int rets, err;

T
Tomas Winkler 已提交
464
	if (WARN_ON(!cl || !cl->dev))
465 466
		return -ENODEV;

T
Tomas Winkler 已提交
467 468
	dev = cl->dev;

469 470
	cl_dbg(dev, cl, "disconnecting");

471 472 473
	if (cl->state != MEI_FILE_DISCONNECTING)
		return 0;

474 475 476 477 478 479 480
	rets = pm_runtime_get(&dev->pdev->dev);
	if (rets < 0 && rets != -EINPROGRESS) {
		pm_runtime_put_noidle(&dev->pdev->dev);
		cl_err(dev, cl, "rpm: get failed %d\n", rets);
		return rets;
	}

481
	cb = mei_io_cb_init(cl, NULL);
482 483 484 485
	if (!cb) {
		rets = -ENOMEM;
		goto free;
	}
486 487

	cb->fop_type = MEI_FOP_CLOSE;
488
	if (mei_hbuf_acquire(dev)) {
489 490
		if (mei_hbm_cl_disconnect_req(dev, cl)) {
			rets = -ENODEV;
491
			cl_err(dev, cl, "failed to disconnect.\n");
492 493 494 495 496
			goto free;
		}
		mdelay(10); /* Wait for hardware disconnection ready */
		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
	} else {
497
		cl_dbg(dev, cl, "add disconnect cb to control write list\n");
498 499 500 501 502 503 504 505 506 507 508 509
		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);

	}
	mutex_unlock(&dev->device_lock);

	err = wait_event_timeout(dev->wait_recvd_msg,
			MEI_FILE_DISCONNECTED == cl->state,
			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));

	mutex_lock(&dev->device_lock);
	if (MEI_FILE_DISCONNECTED == cl->state) {
		rets = 0;
510
		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
511 512 513
	} else {
		rets = -ENODEV;
		if (MEI_FILE_DISCONNECTED != cl->state)
514
			cl_err(dev, cl, "wrong status client disconnect.\n");
515 516

		if (err)
517
			cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err);
518

519
		cl_err(dev, cl, "failed to disconnect from FW client.\n");
520 521 522 523 524
	}

	mei_io_list_flush(&dev->ctrl_rd_list, cl);
	mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
525 526 527 528
	cl_dbg(dev, cl, "rpm: autosuspend\n");
	pm_runtime_mark_last_busy(&dev->pdev->dev);
	pm_runtime_put_autosuspend(&dev->pdev->dev);

529 530 531 532 533 534
	mei_io_cb_free(cb);
	return rets;
}


/**
T
Tomas Winkler 已提交
535 536
 * mei_cl_is_other_connecting - checks if other
 *    client with the same me client id is connecting
537 538 539
 *
 * @cl: private data of the file object
 *
540
 * returns true if other client is connected, false - otherwise.
541
 */
T
Tomas Winkler 已提交
542
bool mei_cl_is_other_connecting(struct mei_cl *cl)
543
{
T
Tomas Winkler 已提交
544
	struct mei_device *dev;
545
	struct mei_cl *ocl; /* the other client */
546

T
Tomas Winkler 已提交
547 548 549 550 551
	if (WARN_ON(!cl || !cl->dev))
		return false;

	dev = cl->dev;

552 553 554 555
	list_for_each_entry(ocl, &dev->file_list, link) {
		if (ocl->state == MEI_FILE_CONNECTING &&
		    ocl != cl &&
		    cl->me_client_id == ocl->me_client_id)
T
Tomas Winkler 已提交
556
			return true;
557 558

	}
T
Tomas Winkler 已提交
559 560

	return false;
561 562
}

563
/**
564
 * mei_cl_connect - connect host client to the me one
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
 *
 * @cl: host client
 *
 * Locking: called under "dev->device_lock" lock
 *
 * returns 0 on success, <0 on failure.
 */
int mei_cl_connect(struct mei_cl *cl, struct file *file)
{
	struct mei_device *dev;
	struct mei_cl_cb *cb;
	int rets;

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

583 584 585 586 587 588 589
	rets = pm_runtime_get(&dev->pdev->dev);
	if (rets < 0 && rets != -EINPROGRESS) {
		pm_runtime_put_noidle(&dev->pdev->dev);
		cl_err(dev, cl, "rpm: get failed %d\n", rets);
		return rets;
	}

590 591 592 593 594 595
	cb = mei_io_cb_init(cl, file);
	if (!cb) {
		rets = -ENOMEM;
		goto out;
	}

596
	cb->fop_type = MEI_FOP_CONNECT;
597

598 599
	/* run hbuf acquire last so we don't have to undo */
	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
600 601 602 603 604 605 606 607 608 609 610
		if (mei_hbm_cl_connect_req(dev, cl)) {
			rets = -ENODEV;
			goto out;
		}
		cl->timer_count = MEI_CONNECT_TIMEOUT;
		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
	} else {
		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
	}

	mutex_unlock(&dev->device_lock);
611 612 613 614
	wait_event_timeout(dev->wait_recvd_msg,
			(cl->state == MEI_FILE_CONNECTED ||
			 cl->state == MEI_FILE_DISCONNECTED),
			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
615 616 617
	mutex_lock(&dev->device_lock);

	if (cl->state != MEI_FILE_CONNECTED) {
618 619 620
		/* something went really wrong */
		if (!cl->status)
			cl->status = -EFAULT;
621 622 623 624 625 626 627 628

		mei_io_list_flush(&dev->ctrl_rd_list, cl);
		mei_io_list_flush(&dev->ctrl_wr_list, cl);
	}

	rets = cl->status;

out:
629 630 631 632
	cl_dbg(dev, cl, "rpm: autosuspend\n");
	pm_runtime_mark_last_busy(&dev->pdev->dev);
	pm_runtime_put_autosuspend(&dev->pdev->dev);

633 634 635 636
	mei_io_cb_free(cb);
	return rets;
}

637
/**
T
Tomas Winkler 已提交
638
 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
639 640 641 642 643 644 645
 *
 * @cl: private data of the file object
 *
 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
 *	-ENOENT if mei_cl is not present
 *	-EINVAL if single_recv_buf == 0
 */
T
Tomas Winkler 已提交
646
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
647
{
T
Tomas Winkler 已提交
648
	struct mei_device *dev;
649 650
	struct mei_me_client *me_cl;
	int id;
651

T
Tomas Winkler 已提交
652 653 654 655 656
	if (WARN_ON(!cl || !cl->dev))
		return -EINVAL;

	dev = cl->dev;

657 658 659 660 661 662
	if (!dev->me_clients_num)
		return 0;

	if (cl->mei_flow_ctrl_creds > 0)
		return 1;

663 664 665 666
	id = mei_me_cl_by_id(dev, cl->me_client_id);
	if (id < 0) {
		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
		return id;
667
	}
668 669 670 671 672 673 674 675

	me_cl = &dev->me_clients[id];
	if (me_cl->mei_flow_ctrl_creds) {
		if (WARN_ON(me_cl->props.single_recv_buf == 0))
			return -EINVAL;
		return 1;
	}
	return 0;
676 677 678
}

/**
T
Tomas Winkler 已提交
679
 * mei_cl_flow_ctrl_reduce - reduces flow_control.
680 681
 *
 * @cl: private data of the file object
682
 *
683 684 685 686 687
 * @returns
 *	0 on success
 *	-ENOENT when me client is not found
 *	-EINVAL when ctrl credits are <= 0
 */
T
Tomas Winkler 已提交
688
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
689
{
T
Tomas Winkler 已提交
690
	struct mei_device *dev;
691 692
	struct mei_me_client *me_cl;
	int id;
693

T
Tomas Winkler 已提交
694 695 696 697 698
	if (WARN_ON(!cl || !cl->dev))
		return -EINVAL;

	dev = cl->dev;

699 700 701 702 703
	id = mei_me_cl_by_id(dev, cl->me_client_id);
	if (id < 0) {
		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
		return id;
	}
704

705 706 707 708 709 710 711 712 713
	me_cl = &dev->me_clients[id];
	if (me_cl->props.single_recv_buf != 0) {
		if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
			return -EINVAL;
		me_cl->mei_flow_ctrl_creds--;
	} else {
		if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
			return -EINVAL;
		cl->mei_flow_ctrl_creds--;
714
	}
715
	return 0;
716 717
}

718
/**
719
 * mei_cl_read_start - the start read client message function.
720
 *
T
Tomas Winkler 已提交
721
 * @cl: host client
722 723 724
 *
 * returns 0 on success, <0 on failure.
 */
T
Tomas Winkler 已提交
725
int mei_cl_read_start(struct mei_cl *cl, size_t length)
726
{
T
Tomas Winkler 已提交
727
	struct mei_device *dev;
728
	struct mei_cl_cb *cb;
729
	int rets;
730 731
	int i;

T
Tomas Winkler 已提交
732 733 734 735 736
	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

737
	if (!mei_cl_is_connected(cl))
738 739
		return -ENODEV;

740
	if (cl->read_cb) {
741
		cl_dbg(dev, cl, "read is pending.\n");
742 743
		return -EBUSY;
	}
744 745
	i = mei_me_cl_by_id(dev, cl->me_client_id);
	if (i < 0) {
746
		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
747
		return  -ENOTTY;
748
	}
749

750 751 752 753 754 755 756
	rets = pm_runtime_get(&dev->pdev->dev);
	if (rets < 0 && rets != -EINPROGRESS) {
		pm_runtime_put_noidle(&dev->pdev->dev);
		cl_err(dev, cl, "rpm: get failed %d\n", rets);
		return rets;
	}

757
	cb = mei_io_cb_init(cl, NULL);
758 759 760 761
	if (!cb) {
		rets = -ENOMEM;
		goto out;
	}
762

T
Tomas Winkler 已提交
763 764 765
	/* always allocate at least client max message */
	length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
	rets = mei_io_cb_alloc_resp_buf(cb, length);
766
	if (rets)
767
		goto out;
768

769
	cb->fop_type = MEI_FOP_READ;
770
	if (mei_hbuf_acquire(dev)) {
771
		if (mei_hbm_cl_flow_control_req(dev, cl)) {
772
			rets = -ENODEV;
773
			goto out;
774
		}
775

776
		list_add_tail(&cb->list, &dev->read_list.list);
777
	} else {
778
		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
779
	}
780 781 782

	cl->read_cb = cb;

783 784 785 786 787 788 789 790
out:
	cl_dbg(dev, cl, "rpm: autosuspend\n");
	pm_runtime_mark_last_busy(&dev->pdev->dev);
	pm_runtime_put_autosuspend(&dev->pdev->dev);

	if (rets)
		mei_io_cb_free(cb);

791 792 793
	return rets;
}

794
/**
795
 * mei_cl_irq_write - write a message to device
796 797 798 799 800 801 802 803
 *	from the interrupt thread context
 *
 * @cl: client
 * @cb: callback block.
 * @cmpl_list: complete list.
 *
 * returns 0, OK; otherwise error.
 */
804 805
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
		     struct mei_cl_cb *cmpl_list)
806
{
807 808
	struct mei_device *dev;
	struct mei_msg_data *buf;
809
	struct mei_msg_hdr mei_hdr;
810 811
	size_t len;
	u32 msg_slots;
812
	int slots;
813
	int rets;
814

815 816 817 818 819 820 821 822 823 824 825 826
	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	buf = &cb->request_buffer;

	rets = mei_cl_flow_ctrl_creds(cl);
	if (rets < 0)
		return rets;

	if (rets == 0) {
827
		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
828 829 830
		return 0;
	}

831
	slots = mei_hbuf_empty_slots(dev);
832 833 834
	len = buf->size - cb->buf_idx;
	msg_slots = mei_data2slots(len);

835 836 837
	mei_hdr.host_addr = cl->host_client_id;
	mei_hdr.me_addr = cl->me_client_id;
	mei_hdr.reserved = 0;
838
	mei_hdr.internal = cb->internal;
839

840
	if (slots >= msg_slots) {
841 842 843
		mei_hdr.length = len;
		mei_hdr.msg_complete = 1;
	/* Split the message only if we can write the whole host buffer */
844 845 846
	} else if (slots == dev->hbuf_depth) {
		msg_slots = slots;
		len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
847 848 849 850 851 852 853
		mei_hdr.length = len;
		mei_hdr.msg_complete = 0;
	} else {
		/* wait for next time the host buffer is empty */
		return 0;
	}

854
	cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
855 856
			cb->request_buffer.size, cb->buf_idx);

857
	rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
858 859
	if (rets) {
		cl->status = rets;
860
		list_move_tail(&cb->list, &cmpl_list->list);
861
		return rets;
862 863 864
	}

	cl->status = 0;
865
	cl->writing_state = MEI_WRITING;
866
	cb->buf_idx += mei_hdr.length;
867

868 869
	if (mei_hdr.msg_complete) {
		if (mei_cl_flow_ctrl_reduce(cl))
870
			return -EIO;
871 872 873 874 875 876
		list_move_tail(&cb->list, &dev->write_waiting_list.list);
	}

	return 0;
}

T
Tomas Winkler 已提交
877 878 879 880 881 882 883
/**
 * mei_cl_write - submit a write cb to mei device
	assumes device_lock is locked
 *
 * @cl: host client
 * @cl: write callback with filled data
 *
884
 * returns number of bytes sent on success, <0 on failure.
T
Tomas Winkler 已提交
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
 */
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
{
	struct mei_device *dev;
	struct mei_msg_data *buf;
	struct mei_msg_hdr mei_hdr;
	int rets;


	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	if (WARN_ON(!cb))
		return -EINVAL;

	dev = cl->dev;


	buf = &cb->request_buffer;

905
	cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
T
Tomas Winkler 已提交
906

907 908 909 910 911 912
	rets = pm_runtime_get(&dev->pdev->dev);
	if (rets < 0 && rets != -EINPROGRESS) {
		pm_runtime_put_noidle(&dev->pdev->dev);
		cl_err(dev, cl, "rpm: get failed %d\n", rets);
		return rets;
	}
T
Tomas Winkler 已提交
913 914

	cb->fop_type = MEI_FOP_WRITE;
915 916 917 918 919 920 921 922
	cb->buf_idx = 0;
	cl->writing_state = MEI_IDLE;

	mei_hdr.host_addr = cl->host_client_id;
	mei_hdr.me_addr = cl->me_client_id;
	mei_hdr.reserved = 0;
	mei_hdr.msg_complete = 0;
	mei_hdr.internal = cb->internal;
T
Tomas Winkler 已提交
923 924 925 926 927

	rets = mei_cl_flow_ctrl_creds(cl);
	if (rets < 0)
		goto err;

928 929 930 931 932 933 934
	if (rets == 0) {
		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
		rets = buf->size;
		goto out;
	}
	if (!mei_hbuf_acquire(dev)) {
		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
T
Tomas Winkler 已提交
935 936 937 938 939 940 941 942 943 944 945 946 947
		rets = buf->size;
		goto out;
	}

	/* Check for a maximum length */
	if (buf->size > mei_hbuf_max_len(dev)) {
		mei_hdr.length = mei_hbuf_max_len(dev);
		mei_hdr.msg_complete = 0;
	} else {
		mei_hdr.length = buf->size;
		mei_hdr.msg_complete = 1;
	}

948 949
	rets = mei_write_message(dev, &mei_hdr, buf->data);
	if (rets)
T
Tomas Winkler 已提交
950 951 952 953 954 955 956
		goto err;

	cl->writing_state = MEI_WRITING;
	cb->buf_idx = mei_hdr.length;

out:
	if (mei_hdr.msg_complete) {
957 958
		rets = mei_cl_flow_ctrl_reduce(cl);
		if (rets < 0)
T
Tomas Winkler 已提交
959
			goto err;
960

T
Tomas Winkler 已提交
961 962 963 964 965 966 967 968 969
		list_add_tail(&cb->list, &dev->write_waiting_list.list);
	} else {
		list_add_tail(&cb->list, &dev->write_list.list);
	}


	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {

		mutex_unlock(&dev->device_lock);
970 971
		rets = wait_event_interruptible(cl->tx_wait,
				cl->writing_state == MEI_WRITE_COMPLETE);
T
Tomas Winkler 已提交
972
		mutex_lock(&dev->device_lock);
973 974 975 976 977 978
		/* wait_event_interruptible returns -ERESTARTSYS */
		if (rets) {
			if (signal_pending(current))
				rets = -EINTR;
			goto err;
		}
T
Tomas Winkler 已提交
979
	}
980 981

	rets = buf->size;
T
Tomas Winkler 已提交
982
err:
983 984 985 986
	cl_dbg(dev, cl, "rpm: autosuspend\n");
	pm_runtime_mark_last_busy(&dev->pdev->dev);
	pm_runtime_put_autosuspend(&dev->pdev->dev);

T
Tomas Winkler 已提交
987 988 989 990
	return rets;
}


991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
/**
 * mei_cl_complete - processes completed operation for a client
 *
 * @cl: private data of the file object.
 * @cb: callback block.
 */
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
	if (cb->fop_type == MEI_FOP_WRITE) {
		mei_io_cb_free(cb);
		cb = NULL;
		cl->writing_state = MEI_WRITE_COMPLETE;
		if (waitqueue_active(&cl->tx_wait))
			wake_up_interruptible(&cl->tx_wait);

	} else if (cb->fop_type == MEI_FOP_READ &&
			MEI_READING == cl->reading_state) {
		cl->reading_state = MEI_READ_COMPLETE;
		if (waitqueue_active(&cl->rx_wait))
			wake_up_interruptible(&cl->rx_wait);
		else
			mei_cl_bus_rx_event(cl);

	}
}

T
Tomas Winkler 已提交
1017

1018 1019 1020 1021 1022 1023 1024 1025
/**
 * mei_cl_all_disconnect - disconnect forcefully all connected clients
 *
 * @dev - mei device
 */

void mei_cl_all_disconnect(struct mei_device *dev)
{
1026
	struct mei_cl *cl;
1027

1028
	list_for_each_entry(cl, &dev->file_list, link) {
1029 1030 1031 1032 1033 1034 1035 1036
		cl->state = MEI_FILE_DISCONNECTED;
		cl->mei_flow_ctrl_creds = 0;
		cl->timer_count = 0;
	}
}


/**
T
Tomas Winkler 已提交
1037
 * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted
1038 1039 1040
 *
 * @dev  - mei device
 */
T
Tomas Winkler 已提交
1041
void mei_cl_all_wakeup(struct mei_device *dev)
1042
{
1043 1044
	struct mei_cl *cl;
	list_for_each_entry(cl, &dev->file_list, link) {
1045
		if (waitqueue_active(&cl->rx_wait)) {
1046
			cl_dbg(dev, cl, "Waking up reading client!\n");
1047 1048
			wake_up_interruptible(&cl->rx_wait);
		}
T
Tomas Winkler 已提交
1049
		if (waitqueue_active(&cl->tx_wait)) {
1050
			cl_dbg(dev, cl, "Waking up writing client!\n");
T
Tomas Winkler 已提交
1051 1052
			wake_up_interruptible(&cl->tx_wait);
		}
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	}
}

/**
 * mei_cl_all_write_clear - clear all pending writes

 * @dev - mei device
 */
void mei_cl_all_write_clear(struct mei_device *dev)
{
1063 1064
	mei_io_list_free(&dev->write_list, NULL);
	mei_io_list_free(&dev->write_waiting_list, NULL);
1065 1066 1067
}