client.c 23.2 KB
Newer Older
1 2 3
/*
 *
 * Intel Management Engine Interface (Intel MEI) Linux driver
4
 * Copyright (c) 2003-2012, Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 */

#include <linux/sched.h>
18 19
#include <linux/wait.h>
#include <linux/delay.h>
20
#include <linux/slab.h>
21
#include <linux/pm_runtime.h>
22

23
#include <linux/mei.h>
24 25

#include "mei_dev.h"
26
#include "hbm.h"
T
Tomas Winkler 已提交
27 28 29 30 31 32
#include "client.h"

/**
 * mei_me_cl_by_uuid - locate index of me client
 *
 * @dev: mei device
33 34 35
 *
 * Locking: called under "dev->device_lock" lock
 *
36
 * returns me client or NULL if not found
T
Tomas Winkler 已提交
37
 */
38 39
struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
					const uuid_le *uuid)
T
Tomas Winkler 已提交
40
{
41
	struct mei_me_client *me_cl;
T
Tomas Winkler 已提交
42

43 44 45
	list_for_each_entry(me_cl, &dev->me_clients, list)
		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
			return me_cl;
T
Tomas Winkler 已提交
46

47
	return NULL;
T
Tomas Winkler 已提交
48 49 50 51 52 53 54 55 56 57
}

/**
 * mei_me_cl_by_id return index to me_clients for client_id
 *
 * @dev: the device structure
 * @client_id: me client id
 *
 * Locking: called under "dev->device_lock" lock
 *
58
 * returns me client or NULL if not found
T
Tomas Winkler 已提交
59 60
 */

61
struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
T
Tomas Winkler 已提交
62
{
63

64
	struct mei_me_client *me_cl;
T
Tomas Winkler 已提交
65

66 67 68
	list_for_each_entry(me_cl, &dev->me_clients, list)
		if (me_cl->client_id == client_id)
			return me_cl;
69
	return NULL;
T
Tomas Winkler 已提交
70
}
71

72 73 74 75 76 77 78 79 80 81 82 83
struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
					   const uuid_le *uuid, u8 client_id)
{
	struct mei_me_client *me_cl;

	list_for_each_entry(me_cl, &dev->me_clients, list)
		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
		    me_cl->client_id == client_id)
			return me_cl;
	return NULL;
}

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/**
 * mei_me_cl_remove - remove me client matching uuid and client_id
 *
 * @dev: the device structure
 * @uuid: me client uuid
 * @client_id: me client address
 */
void mei_me_cl_remove(struct mei_device *dev, const uuid_le *uuid, u8 client_id)
{
	struct mei_me_client *me_cl, *next;

	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) {
		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
		    me_cl->client_id == client_id) {
			list_del(&me_cl->list);
			kfree(me_cl);
			break;
		}
	}
}

105 106

/**
107
 * mei_cl_cmp_id - tells if the clients are the same
108
 *
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
 * @cl1: host client 1
 * @cl2: host client 2
 *
 * returns true  - if the clients has same host and me ids
 *         false - otherwise
 */
static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
				const struct mei_cl *cl2)
{
	return cl1 && cl2 &&
		(cl1->host_client_id == cl2->host_client_id) &&
		(cl1->me_client_id == cl2->me_client_id);
}

/**
 * mei_io_list_flush - removes cbs belonging to cl.
 *
 * @list:  an instance of our list structure
 * @cl:    host client, can be NULL for flushing the whole list
 * @free:  whether to free the cbs
129
 */
130 131
static void __mei_io_list_flush(struct mei_cl_cb *list,
				struct mei_cl *cl, bool free)
132 133 134 135
{
	struct mei_cl_cb *cb;
	struct mei_cl_cb *next;

136
	/* enable removing everything if no cl is specified */
137
	list_for_each_entry_safe(cb, next, &list->list, list) {
138
		if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
139
			list_del(&cb->list);
140 141 142
			if (free)
				mei_io_cb_free(cb);
		}
143 144 145
	}
}

146 147 148 149 150 151
/**
 * mei_io_list_flush - removes list entry belonging to cl.
 *
 * @list:  An instance of our list structure
 * @cl: host client
 */
152
void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
{
	__mei_io_list_flush(list, cl, false);
}


/**
 * mei_io_list_free - removes cb belonging to cl and free them
 *
 * @list:  An instance of our list structure
 * @cl: host client
 */
static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
{
	__mei_io_list_flush(list, cl, true);
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182
/**
 * mei_io_cb_free - free mei_cb_private related memory
 *
 * @cb: mei callback struct
 */
void mei_io_cb_free(struct mei_cl_cb *cb)
{
	if (cb == NULL)
		return;

	kfree(cb->request_buffer.data);
	kfree(cb->response_buffer.data);
	kfree(cb);
}
183

184 185 186 187
/**
 * mei_io_cb_init - allocate and initialize io callback
 *
 * @cl - mei client
188
 * @fp: pointer to file structure
189 190 191 192 193 194 195 196 197 198 199 200 201 202
 *
 * returns mei_cl_cb pointer or NULL;
 */
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
{
	struct mei_cl_cb *cb;

	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
	if (!cb)
		return NULL;

	mei_io_list_init(cb);

	cb->file_object = fp;
203
	cb->cl = cl;
204 205 206 207 208 209 210
	cb->buf_idx = 0;
	return cb;
}

/**
 * mei_io_cb_alloc_req_buf - allocate request buffer
 *
211 212
 * @cb: io callback structure
 * @length: size of the buffer
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
 *
 * returns 0 on success
 *         -EINVAL if cb is NULL
 *         -ENOMEM if allocation failed
 */
int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
{
	if (!cb)
		return -EINVAL;

	if (length == 0)
		return 0;

	cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
	if (!cb->request_buffer.data)
		return -ENOMEM;
	cb->request_buffer.size = length;
	return 0;
}
/**
233
 * mei_io_cb_alloc_resp_buf - allocate response buffer
234
 *
235 236
 * @cb: io callback structure
 * @length: size of the buffer
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
 *
 * returns 0 on success
 *         -EINVAL if cb is NULL
 *         -ENOMEM if allocation failed
 */
int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
{
	if (!cb)
		return -EINVAL;

	if (length == 0)
		return 0;

	cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
	if (!cb->response_buffer.data)
		return -ENOMEM;
	cb->response_buffer.size = length;
	return 0;
}

257

258 259 260 261 262 263 264 265

/**
 * mei_cl_flush_queues - flushes queue lists belonging to cl.
 *
 * @cl: host client
 */
int mei_cl_flush_queues(struct mei_cl *cl)
{
266 267
	struct mei_device *dev;

T
Tomas Winkler 已提交
268
	if (WARN_ON(!cl || !cl->dev))
269 270
		return -EINVAL;

271 272 273
	dev = cl->dev;

	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
274
	mei_io_list_flush(&cl->dev->read_list, cl);
275 276
	mei_io_list_free(&cl->dev->write_list, cl);
	mei_io_list_free(&cl->dev->write_waiting_list, cl);
277 278 279 280 281 282 283
	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
	return 0;
}

284

285
/**
286
 * mei_cl_init - initializes cl.
287 288 289 290 291 292 293 294 295 296 297
 *
 * @cl: host client to be initialized
 * @dev: mei device
 */
void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
	memset(cl, 0, sizeof(struct mei_cl));
	init_waitqueue_head(&cl->wait);
	init_waitqueue_head(&cl->rx_wait);
	init_waitqueue_head(&cl->tx_wait);
	INIT_LIST_HEAD(&cl->link);
298
	INIT_LIST_HEAD(&cl->device_link);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
	cl->reading_state = MEI_IDLE;
	cl->writing_state = MEI_IDLE;
	cl->dev = dev;
}

/**
 * mei_cl_allocate - allocates cl  structure and sets it up.
 *
 * @dev: mei device
 * returns  The allocated file or NULL on failure
 */
struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
	struct mei_cl *cl;

	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
	if (!cl)
		return NULL;

	mei_cl_init(cl, dev);

	return cl;
}

T
Tomas Winkler 已提交
323 324 325
/**
 * mei_cl_find_read_cb - find this cl's callback in the read list
 *
326 327
 * @cl: host client
 *
T
Tomas Winkler 已提交
328 329 330 331 332
 * returns cb on success, NULL on error
 */
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
{
	struct mei_device *dev = cl->dev;
333
	struct mei_cl_cb *cb;
T
Tomas Winkler 已提交
334

335
	list_for_each_entry(cb, &dev->read_list.list, list)
T
Tomas Winkler 已提交
336 337 338 339 340
		if (mei_cl_cmp_id(cl, cb->cl))
			return cb;
	return NULL;
}

341
/** mei_cl_link: allocate host id in the host map
342
 *
343
 * @cl - host client
344
 * @id - fixed host id or -1 for generic one
345
 *
346
 * returns 0 on success
347 348 349
 *	-EINVAL on incorrect values
 *	-ENONET if client not found
 */
350
int mei_cl_link(struct mei_cl *cl, int id)
351
{
T
Tomas Winkler 已提交
352
	struct mei_device *dev;
T
Tomas Winkler 已提交
353
	long open_handle_count;
354

355
	if (WARN_ON(!cl || !cl->dev))
356 357
		return -EINVAL;

T
Tomas Winkler 已提交
358 359
	dev = cl->dev;

360
	/* If Id is not assigned get one*/
361 362 363
	if (id == MEI_HOST_CLIENT_ID_ANY)
		id = find_first_zero_bit(dev->host_clients_map,
					MEI_CLIENTS_MAX);
364

365
	if (id >= MEI_CLIENTS_MAX) {
366
		dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
367 368 369
		return -EMFILE;
	}

T
Tomas Winkler 已提交
370 371
	open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
	if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
372
		dev_err(dev->dev, "open_handle_count exceeded %d",
373 374
			MEI_MAX_OPEN_HANDLE_COUNT);
		return -EMFILE;
375 376
	}

377 378 379 380 381 382 383 384 385
	dev->open_handle_count++;

	cl->host_client_id = id;
	list_add_tail(&cl->link, &dev->file_list);

	set_bit(id, dev->host_clients_map);

	cl->state = MEI_FILE_INITIALIZING;

386
	cl_dbg(dev, cl, "link cl\n");
387
	return 0;
388
}
389

390
/**
T
Tomas Winkler 已提交
391
 * mei_cl_unlink - remove me_cl from the list
392
 *
393
 * @cl: host client
394
 */
T
Tomas Winkler 已提交
395
int mei_cl_unlink(struct mei_cl *cl)
396
{
T
Tomas Winkler 已提交
397 398
	struct mei_device *dev;

399 400 401 402
	/* don't shout on error exit path */
	if (!cl)
		return 0;

403 404 405
	/* wd and amthif might not be initialized */
	if (!cl->dev)
		return 0;
T
Tomas Winkler 已提交
406 407 408

	dev = cl->dev;

409 410
	cl_dbg(dev, cl, "unlink client");

T
Tomas Winkler 已提交
411 412 413 414 415 416 417 418 419 420 421
	if (dev->open_handle_count > 0)
		dev->open_handle_count--;

	/* never clear the 0 bit */
	if (cl->host_client_id)
		clear_bit(cl->host_client_id, dev->host_clients_map);

	list_del_init(&cl->link);

	cl->state = MEI_FILE_INITIALIZING;

T
Tomas Winkler 已提交
422
	return 0;
423 424 425 426 427 428 429
}


void mei_host_client_init(struct work_struct *work)
{
	struct mei_device *dev = container_of(work,
					      struct mei_device, init_work);
430 431
	struct mei_me_client *me_cl;
	struct mei_client_properties *props;
432 433 434

	mutex_lock(&dev->device_lock);

435 436
	list_for_each_entry(me_cl, &dev->me_clients, list) {
		props = &me_cl->props;
437

438
		if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid))
439
			mei_amthif_host_init(dev);
440
		else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid))
441
			mei_wd_host_init(dev);
442
		else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid))
443 444
			mei_nfc_host_init(dev);

445 446 447
	}

	dev->dev_state = MEI_DEV_ENABLED;
448
	dev->reset_count = 0;
449 450

	mutex_unlock(&dev->device_lock);
451

452 453 454
	pm_runtime_mark_last_busy(dev->dev);
	dev_dbg(dev->dev, "rpm: autosuspend\n");
	pm_runtime_autosuspend(dev->dev);
455 456
}

457 458 459 460 461 462 463 464
/**
 * mei_hbuf_acquire: try to acquire host buffer
 *
 * @dev: the device structure
 * returns true if host buffer was acquired
 */
bool mei_hbuf_acquire(struct mei_device *dev)
{
465 466
	if (mei_pg_state(dev) == MEI_PG_ON ||
	    dev->pg_event == MEI_PG_EVENT_WAIT) {
467
		dev_dbg(dev->dev, "device is in pg\n");
468 469 470
		return false;
	}

471
	if (!dev->hbuf_is_ready) {
472
		dev_dbg(dev->dev, "hbuf is not ready\n");
473 474 475 476 477 478 479
		return false;
	}

	dev->hbuf_is_ready = false;

	return true;
}
480 481

/**
482
 * mei_cl_disconnect - disconnect host client from the me one
483
 *
T
Tomas Winkler 已提交
484
 * @cl: host client
485 486 487 488 489
 *
 * Locking: called under "dev->device_lock" lock
 *
 * returns 0 on success, <0 on failure.
 */
T
Tomas Winkler 已提交
490
int mei_cl_disconnect(struct mei_cl *cl)
491
{
T
Tomas Winkler 已提交
492
	struct mei_device *dev;
493
	struct mei_cl_cb *cb;
494
	int rets;
495

T
Tomas Winkler 已提交
496
	if (WARN_ON(!cl || !cl->dev))
497 498
		return -ENODEV;

T
Tomas Winkler 已提交
499 500
	dev = cl->dev;

501 502
	cl_dbg(dev, cl, "disconnecting");

503 504 505
	if (cl->state != MEI_FILE_DISCONNECTING)
		return 0;

506
	rets = pm_runtime_get(dev->dev);
507
	if (rets < 0 && rets != -EINPROGRESS) {
508
		pm_runtime_put_noidle(dev->dev);
509 510 511 512
		cl_err(dev, cl, "rpm: get failed %d\n", rets);
		return rets;
	}

513
	cb = mei_io_cb_init(cl, NULL);
514 515 516 517
	if (!cb) {
		rets = -ENOMEM;
		goto free;
	}
518

519 520
	cb->fop_type = MEI_FOP_DISCONNECT;

521
	if (mei_hbuf_acquire(dev)) {
522 523
		if (mei_hbm_cl_disconnect_req(dev, cl)) {
			rets = -ENODEV;
524
			cl_err(dev, cl, "failed to disconnect.\n");
525 526
			goto free;
		}
527
		cl->timer_count = MEI_CONNECT_TIMEOUT;
528 529 530
		mdelay(10); /* Wait for hardware disconnection ready */
		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
	} else {
531
		cl_dbg(dev, cl, "add disconnect cb to control write list\n");
532 533 534 535 536
		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);

	}
	mutex_unlock(&dev->device_lock);

537
	wait_event_timeout(cl->wait,
538 539 540 541
			MEI_FILE_DISCONNECTED == cl->state,
			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));

	mutex_lock(&dev->device_lock);
542

543 544
	if (MEI_FILE_DISCONNECTED == cl->state) {
		rets = 0;
545
		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
546
	} else {
547 548
		cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
		rets = -ETIME;
549 550 551 552 553
	}

	mei_io_list_flush(&dev->ctrl_rd_list, cl);
	mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
554
	cl_dbg(dev, cl, "rpm: autosuspend\n");
555 556
	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
557

558 559 560 561 562 563
	mei_io_cb_free(cb);
	return rets;
}


/**
T
Tomas Winkler 已提交
564 565
 * mei_cl_is_other_connecting - checks if other
 *    client with the same me client id is connecting
566 567 568
 *
 * @cl: private data of the file object
 *
569
 * returns true if other client is connected, false - otherwise.
570
 */
T
Tomas Winkler 已提交
571
bool mei_cl_is_other_connecting(struct mei_cl *cl)
572
{
T
Tomas Winkler 已提交
573
	struct mei_device *dev;
574
	struct mei_cl *ocl; /* the other client */
575

T
Tomas Winkler 已提交
576 577 578 579 580
	if (WARN_ON(!cl || !cl->dev))
		return false;

	dev = cl->dev;

581 582 583 584
	list_for_each_entry(ocl, &dev->file_list, link) {
		if (ocl->state == MEI_FILE_CONNECTING &&
		    ocl != cl &&
		    cl->me_client_id == ocl->me_client_id)
T
Tomas Winkler 已提交
585
			return true;
586 587

	}
T
Tomas Winkler 已提交
588 589

	return false;
590 591
}

592
/**
593
 * mei_cl_connect - connect host client to the me one
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
 *
 * @cl: host client
 *
 * Locking: called under "dev->device_lock" lock
 *
 * returns 0 on success, <0 on failure.
 */
int mei_cl_connect(struct mei_cl *cl, struct file *file)
{
	struct mei_device *dev;
	struct mei_cl_cb *cb;
	int rets;

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

612
	rets = pm_runtime_get(dev->dev);
613
	if (rets < 0 && rets != -EINPROGRESS) {
614
		pm_runtime_put_noidle(dev->dev);
615 616 617 618
		cl_err(dev, cl, "rpm: get failed %d\n", rets);
		return rets;
	}

619 620 621 622 623 624
	cb = mei_io_cb_init(cl, file);
	if (!cb) {
		rets = -ENOMEM;
		goto out;
	}

625
	cb->fop_type = MEI_FOP_CONNECT;
626

627 628
	/* run hbuf acquire last so we don't have to undo */
	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
629
		cl->state = MEI_FILE_CONNECTING;
630 631 632 633 634 635 636
		if (mei_hbm_cl_connect_req(dev, cl)) {
			rets = -ENODEV;
			goto out;
		}
		cl->timer_count = MEI_CONNECT_TIMEOUT;
		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
	} else {
637
		cl->state = MEI_FILE_INITIALIZING;
638 639 640 641
		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
	}

	mutex_unlock(&dev->device_lock);
642
	wait_event_timeout(cl->wait,
643 644 645
			(cl->state == MEI_FILE_CONNECTED ||
			 cl->state == MEI_FILE_DISCONNECTED),
			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
646 647 648
	mutex_lock(&dev->device_lock);

	if (cl->state != MEI_FILE_CONNECTED) {
649
		cl->state = MEI_FILE_DISCONNECTED;
650 651 652
		/* something went really wrong */
		if (!cl->status)
			cl->status = -EFAULT;
653 654 655 656 657 658 659 660

		mei_io_list_flush(&dev->ctrl_rd_list, cl);
		mei_io_list_flush(&dev->ctrl_wr_list, cl);
	}

	rets = cl->status;

out:
661
	cl_dbg(dev, cl, "rpm: autosuspend\n");
662 663
	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
664

665 666 667 668
	mei_io_cb_free(cb);
	return rets;
}

669
/**
T
Tomas Winkler 已提交
670
 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
671 672 673 674 675 676 677
 *
 * @cl: private data of the file object
 *
 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
 *	-ENOENT if mei_cl is not present
 *	-EINVAL if single_recv_buf == 0
 */
T
Tomas Winkler 已提交
678
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
679
{
T
Tomas Winkler 已提交
680
	struct mei_device *dev;
681
	struct mei_me_client *me_cl;
682

T
Tomas Winkler 已提交
683 684 685 686 687
	if (WARN_ON(!cl || !cl->dev))
		return -EINVAL;

	dev = cl->dev;

688 689 690
	if (cl->mei_flow_ctrl_creds > 0)
		return 1;

691 692
	me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
	if (!me_cl) {
693
		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
694
		return -ENOENT;
695
	}
696 697 698 699 700 701 702

	if (me_cl->mei_flow_ctrl_creds) {
		if (WARN_ON(me_cl->props.single_recv_buf == 0))
			return -EINVAL;
		return 1;
	}
	return 0;
703 704 705
}

/**
T
Tomas Winkler 已提交
706
 * mei_cl_flow_ctrl_reduce - reduces flow_control.
707 708
 *
 * @cl: private data of the file object
709
 *
710 711 712 713 714
 * @returns
 *	0 on success
 *	-ENOENT when me client is not found
 *	-EINVAL when ctrl credits are <= 0
 */
T
Tomas Winkler 已提交
715
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
716
{
T
Tomas Winkler 已提交
717
	struct mei_device *dev;
718
	struct mei_me_client *me_cl;
719

T
Tomas Winkler 已提交
720 721 722 723 724
	if (WARN_ON(!cl || !cl->dev))
		return -EINVAL;

	dev = cl->dev;

725 726
	me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
	if (!me_cl) {
727
		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
728
		return -ENOENT;
729
	}
730

731
	if (me_cl->props.single_recv_buf) {
732 733 734 735 736 737 738
		if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
			return -EINVAL;
		me_cl->mei_flow_ctrl_creds--;
	} else {
		if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
			return -EINVAL;
		cl->mei_flow_ctrl_creds--;
739
	}
740
	return 0;
741 742
}

743
/**
744
 * mei_cl_read_start - the start read client message function.
745
 *
T
Tomas Winkler 已提交
746
 * @cl: host client
747 748 749
 *
 * returns 0 on success, <0 on failure.
 */
T
Tomas Winkler 已提交
750
int mei_cl_read_start(struct mei_cl *cl, size_t length)
751
{
T
Tomas Winkler 已提交
752
	struct mei_device *dev;
753
	struct mei_cl_cb *cb;
754
	struct mei_me_client *me_cl;
755
	int rets;
756

T
Tomas Winkler 已提交
757 758 759 760 761
	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

762
	if (!mei_cl_is_connected(cl))
763 764
		return -ENODEV;

765
	if (cl->read_cb) {
766
		cl_dbg(dev, cl, "read is pending.\n");
767 768
		return -EBUSY;
	}
769
	me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
770
	if (!me_cl) {
771
		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
772
		return  -ENOTTY;
773
	}
774

775
	rets = pm_runtime_get(dev->dev);
776
	if (rets < 0 && rets != -EINPROGRESS) {
777
		pm_runtime_put_noidle(dev->dev);
778 779 780 781
		cl_err(dev, cl, "rpm: get failed %d\n", rets);
		return rets;
	}

782
	cb = mei_io_cb_init(cl, NULL);
783 784 785 786
	if (!cb) {
		rets = -ENOMEM;
		goto out;
	}
787

T
Tomas Winkler 已提交
788
	/* always allocate at least client max message */
789
	length = max_t(size_t, length, me_cl->props.max_msg_length);
T
Tomas Winkler 已提交
790
	rets = mei_io_cb_alloc_resp_buf(cb, length);
791
	if (rets)
792
		goto out;
793

794
	cb->fop_type = MEI_FOP_READ;
795
	if (mei_hbuf_acquire(dev)) {
796 797
		rets = mei_hbm_cl_flow_control_req(dev, cl);
		if (rets < 0)
798 799
			goto out;

800
		list_add_tail(&cb->list, &dev->read_list.list);
801
	} else {
802
		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
803
	}
804 805 806

	cl->read_cb = cb;

807 808
out:
	cl_dbg(dev, cl, "rpm: autosuspend\n");
809 810
	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
811 812 813 814

	if (rets)
		mei_io_cb_free(cb);

815 816 817
	return rets;
}

818
/**
819
 * mei_cl_irq_write - write a message to device
820 821 822 823 824 825 826 827
 *	from the interrupt thread context
 *
 * @cl: client
 * @cb: callback block.
 * @cmpl_list: complete list.
 *
 * returns 0, OK; otherwise error.
 */
828 829
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
		     struct mei_cl_cb *cmpl_list)
830
{
831 832
	struct mei_device *dev;
	struct mei_msg_data *buf;
833
	struct mei_msg_hdr mei_hdr;
834 835
	size_t len;
	u32 msg_slots;
836
	int slots;
837
	int rets;
838

839 840 841 842 843 844 845 846 847 848 849 850
	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	buf = &cb->request_buffer;

	rets = mei_cl_flow_ctrl_creds(cl);
	if (rets < 0)
		return rets;

	if (rets == 0) {
851
		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
852 853 854
		return 0;
	}

855
	slots = mei_hbuf_empty_slots(dev);
856 857 858
	len = buf->size - cb->buf_idx;
	msg_slots = mei_data2slots(len);

859 860 861
	mei_hdr.host_addr = cl->host_client_id;
	mei_hdr.me_addr = cl->me_client_id;
	mei_hdr.reserved = 0;
862
	mei_hdr.internal = cb->internal;
863

864
	if (slots >= msg_slots) {
865 866 867
		mei_hdr.length = len;
		mei_hdr.msg_complete = 1;
	/* Split the message only if we can write the whole host buffer */
868 869 870
	} else if (slots == dev->hbuf_depth) {
		msg_slots = slots;
		len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
871 872 873 874 875 876 877
		mei_hdr.length = len;
		mei_hdr.msg_complete = 0;
	} else {
		/* wait for next time the host buffer is empty */
		return 0;
	}

878
	cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
879 880
			cb->request_buffer.size, cb->buf_idx);

881
	rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
882 883
	if (rets) {
		cl->status = rets;
884
		list_move_tail(&cb->list, &cmpl_list->list);
885
		return rets;
886 887 888
	}

	cl->status = 0;
889
	cl->writing_state = MEI_WRITING;
890
	cb->buf_idx += mei_hdr.length;
891

892 893
	if (mei_hdr.msg_complete) {
		if (mei_cl_flow_ctrl_reduce(cl))
894
			return -EIO;
895 896 897 898 899 900
		list_move_tail(&cb->list, &dev->write_waiting_list.list);
	}

	return 0;
}

T
Tomas Winkler 已提交
901 902 903 904 905 906 907
/**
 * mei_cl_write - submit a write cb to mei device
	assumes device_lock is locked
 *
 * @cl: host client
 * @cl: write callback with filled data
 *
908
 * returns number of bytes sent on success, <0 on failure.
T
Tomas Winkler 已提交
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
 */
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
{
	struct mei_device *dev;
	struct mei_msg_data *buf;
	struct mei_msg_hdr mei_hdr;
	int rets;


	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	if (WARN_ON(!cb))
		return -EINVAL;

	dev = cl->dev;


	buf = &cb->request_buffer;

929
	cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
T
Tomas Winkler 已提交
930

931
	rets = pm_runtime_get(dev->dev);
932
	if (rets < 0 && rets != -EINPROGRESS) {
933
		pm_runtime_put_noidle(dev->dev);
934 935 936
		cl_err(dev, cl, "rpm: get failed %d\n", rets);
		return rets;
	}
T
Tomas Winkler 已提交
937 938

	cb->fop_type = MEI_FOP_WRITE;
939 940 941 942 943 944 945 946
	cb->buf_idx = 0;
	cl->writing_state = MEI_IDLE;

	mei_hdr.host_addr = cl->host_client_id;
	mei_hdr.me_addr = cl->me_client_id;
	mei_hdr.reserved = 0;
	mei_hdr.msg_complete = 0;
	mei_hdr.internal = cb->internal;
T
Tomas Winkler 已提交
947 948 949 950 951

	rets = mei_cl_flow_ctrl_creds(cl);
	if (rets < 0)
		goto err;

952 953 954 955 956 957 958
	if (rets == 0) {
		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
		rets = buf->size;
		goto out;
	}
	if (!mei_hbuf_acquire(dev)) {
		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
T
Tomas Winkler 已提交
959 960 961 962 963 964 965 966 967 968 969 970 971
		rets = buf->size;
		goto out;
	}

	/* Check for a maximum length */
	if (buf->size > mei_hbuf_max_len(dev)) {
		mei_hdr.length = mei_hbuf_max_len(dev);
		mei_hdr.msg_complete = 0;
	} else {
		mei_hdr.length = buf->size;
		mei_hdr.msg_complete = 1;
	}

972 973
	rets = mei_write_message(dev, &mei_hdr, buf->data);
	if (rets)
T
Tomas Winkler 已提交
974 975 976 977 978 979 980
		goto err;

	cl->writing_state = MEI_WRITING;
	cb->buf_idx = mei_hdr.length;

out:
	if (mei_hdr.msg_complete) {
981 982
		rets = mei_cl_flow_ctrl_reduce(cl);
		if (rets < 0)
T
Tomas Winkler 已提交
983
			goto err;
984

T
Tomas Winkler 已提交
985 986 987 988 989 990 991 992 993
		list_add_tail(&cb->list, &dev->write_waiting_list.list);
	} else {
		list_add_tail(&cb->list, &dev->write_list.list);
	}


	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {

		mutex_unlock(&dev->device_lock);
994 995
		rets = wait_event_interruptible(cl->tx_wait,
				cl->writing_state == MEI_WRITE_COMPLETE);
T
Tomas Winkler 已提交
996
		mutex_lock(&dev->device_lock);
997 998 999 1000 1001 1002
		/* wait_event_interruptible returns -ERESTARTSYS */
		if (rets) {
			if (signal_pending(current))
				rets = -EINTR;
			goto err;
		}
T
Tomas Winkler 已提交
1003
	}
1004 1005

	rets = buf->size;
T
Tomas Winkler 已提交
1006
err:
1007
	cl_dbg(dev, cl, "rpm: autosuspend\n");
1008 1009
	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
1010

T
Tomas Winkler 已提交
1011 1012 1013 1014
	return rets;
}


1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
/**
 * mei_cl_complete - processes completed operation for a client
 *
 * @cl: private data of the file object.
 * @cb: callback block.
 */
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
	if (cb->fop_type == MEI_FOP_WRITE) {
		mei_io_cb_free(cb);
		cb = NULL;
		cl->writing_state = MEI_WRITE_COMPLETE;
		if (waitqueue_active(&cl->tx_wait))
			wake_up_interruptible(&cl->tx_wait);

	} else if (cb->fop_type == MEI_FOP_READ &&
			MEI_READING == cl->reading_state) {
		cl->reading_state = MEI_READ_COMPLETE;
		if (waitqueue_active(&cl->rx_wait))
			wake_up_interruptible(&cl->rx_wait);
		else
			mei_cl_bus_rx_event(cl);

	}
}

T
Tomas Winkler 已提交
1041

1042 1043 1044 1045 1046 1047 1048 1049
/**
 * mei_cl_all_disconnect - disconnect forcefully all connected clients
 *
 * @dev - mei device
 */

void mei_cl_all_disconnect(struct mei_device *dev)
{
1050
	struct mei_cl *cl;
1051

1052
	list_for_each_entry(cl, &dev->file_list, link) {
1053 1054 1055 1056 1057 1058 1059 1060
		cl->state = MEI_FILE_DISCONNECTED;
		cl->mei_flow_ctrl_creds = 0;
		cl->timer_count = 0;
	}
}


/**
T
Tomas Winkler 已提交
1061
 * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted
1062 1063 1064
 *
 * @dev  - mei device
 */
T
Tomas Winkler 已提交
1065
void mei_cl_all_wakeup(struct mei_device *dev)
1066
{
1067
	struct mei_cl *cl;
1068

1069
	list_for_each_entry(cl, &dev->file_list, link) {
1070
		if (waitqueue_active(&cl->rx_wait)) {
1071
			cl_dbg(dev, cl, "Waking up reading client!\n");
1072 1073
			wake_up_interruptible(&cl->rx_wait);
		}
T
Tomas Winkler 已提交
1074
		if (waitqueue_active(&cl->tx_wait)) {
1075
			cl_dbg(dev, cl, "Waking up writing client!\n");
T
Tomas Winkler 已提交
1076 1077
			wake_up_interruptible(&cl->tx_wait);
		}
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	}
}

/**
 * mei_cl_all_write_clear - clear all pending writes

 * @dev - mei device
 */
void mei_cl_all_write_clear(struct mei_device *dev)
{
1088 1089
	mei_io_list_free(&dev->write_list, NULL);
	mei_io_list_free(&dev->write_waiting_list, NULL);
1090 1091 1092
}