ipmi_msghandler.c 130.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13
/*
 * ipmi_msghandler.c
 *
 * Incoming and outgoing message routing for an IPMI interface.
 *
 * Author: MontaVista Software, Inc.
 *         Corey Minyard <minyard@mvista.com>
 *         source@mvista.com
 *
 * Copyright 2002 MontaVista Software Inc.
 */

14 15 16
#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
#define dev_fmt pr_fmt

L
Linus Torvalds 已提交
17 18 19
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/poll.h>
20
#include <linux/sched.h>
21
#include <linux/seq_file.h>
L
Linus Torvalds 已提交
22
#include <linux/spinlock.h>
23
#include <linux/mutex.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29
#include <linux/slab.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
30
#include <linux/rcupdate.h>
31
#include <linux/interrupt.h>
32
#include <linux/moduleparam.h>
33
#include <linux/workqueue.h>
34
#include <linux/uuid.h>
35
#include <linux/nospec.h>
L
Linus Torvalds 已提交
36

C
Corey Minyard 已提交
37
#define IPMI_DRIVER_VERSION "39.2"
L
Linus Torvalds 已提交
38 39 40

static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
41
static void smi_recv_tasklet(unsigned long);
42 43 44
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
45
			       struct ipmi_smi_msg *msg);
L
Linus Torvalds 已提交
46

C
Corey Minyard 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
#ifdef DEBUG
static void ipmi_debug_msg(const char *title, unsigned char *data,
			   unsigned int len)
{
	int i, pos;
	char buf[100];

	pos = snprintf(buf, sizeof(buf), "%s: ", title);
	for (i = 0; i < len; i++)
		pos += snprintf(buf + pos, sizeof(buf) - pos,
				" %2.2x", data[i]);
	pr_debug("%s\n", buf);
}
#else
static void ipmi_debug_msg(const char *title, unsigned char *data,
			   unsigned int len)
{ }
#endif

66 67
static bool initialized;
static bool drvregistered;
L
Linus Torvalds 已提交
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
enum ipmi_panic_event_op {
	IPMI_SEND_PANIC_EVENT_NONE,
	IPMI_SEND_PANIC_EVENT,
	IPMI_SEND_PANIC_EVENT_STRING
};
#ifdef CONFIG_IPMI_PANIC_STRING
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
#elif defined(CONFIG_IPMI_PANIC_EVENT)
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
#else
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
#endif
static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;

static int panic_op_write_handler(const char *val,
				  const struct kernel_param *kp)
{
	char valcp[16];
	char *s;

X
Xiongfeng Wang 已提交
89
	strncpy(valcp, val, 15);
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	valcp[15] = '\0';

	s = strstrip(valcp);

	if (strcmp(s, "none") == 0)
		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
	else if (strcmp(s, "event") == 0)
		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
	else if (strcmp(s, "string") == 0)
		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
	else
		return -EINVAL;

	return 0;
}

static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
{
	switch (ipmi_send_panic_event) {
	case IPMI_SEND_PANIC_EVENT_NONE:
		strcpy(buffer, "none");
		break;

	case IPMI_SEND_PANIC_EVENT:
		strcpy(buffer, "event");
		break;

	case IPMI_SEND_PANIC_EVENT_STRING:
		strcpy(buffer, "string");
		break;

	default:
		strcpy(buffer, "???");
		break;
	}

	return strlen(buffer);
}

static const struct kernel_param_ops panic_op_ops = {
	.set = panic_op_write_handler,
	.get = panic_op_read_handler
};
module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");


L
Linus Torvalds 已提交
137 138
#define MAX_EVENTS_IN_QUEUE	25

139 140 141 142 143 144
/* Remain in auto-maintenance mode for this amount of time (in ms). */
static unsigned long maintenance_mode_timeout_ms = 30000;
module_param(maintenance_mode_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(maintenance_mode_timeout_ms,
		 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");

145 146 147 148
/*
 * Don't let a message sit in a queue forever, always time it with at lest
 * the max message timer.  This is in milliseconds.
 */
L
Linus Torvalds 已提交
149 150
#define MAX_MSG_TIMEOUT		60000

151 152 153 154 155 156 157 158 159 160 161 162 163
/*
 * Timeout times below are in milliseconds, and are done off a 1
 * second timer.  So setting the value to 1000 would mean anything
 * between 0 and 1000ms.  So really the only reasonable minimum
 * setting it 2000ms, which is between 1 and 2 seconds.
 */

/* The default timeout for message retries. */
static unsigned long default_retry_ms = 2000;
module_param(default_retry_ms, ulong, 0644);
MODULE_PARM_DESC(default_retry_ms,
		 "The time (milliseconds) between retry sends");

164 165 166 167 168 169
/* The default timeout for maintenance mode message retries. */
static unsigned long default_maintenance_retry_ms = 3000;
module_param(default_maintenance_retry_ms, ulong, 0644);
MODULE_PARM_DESC(default_maintenance_retry_ms,
		 "The time (milliseconds) between retry sends in maintenance mode");

170 171 172 173 174 175
/* The default maximum number of retries */
static unsigned int default_max_retries = 4;
module_param(default_max_retries, uint, 0644);
MODULE_PARM_DESC(default_max_retries,
		 "The time (milliseconds) between retry sends in maintenance mode");

176 177 178 179 180 181 182 183 184 185 186 187 188 189
/* Call every ~1000 ms. */
#define IPMI_TIMEOUT_TIME	1000

/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)

/*
 * Request events from the queue every second (this is the number of
 * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
 * future, IPMI will add a way to know immediately if an event is in
 * the queue and this silliness can go away.
 */
#define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))

190 191 192
/* How long should we cache dynamic device IDs? */
#define IPMI_DYN_DEV_ID_EXPIRY	(10 * HZ)

193 194 195
/*
 * The main "user" data structure.
 */
196
struct ipmi_user {
L
Linus Torvalds 已提交
197 198
	struct list_head link;

199 200 201 202 203 204
	/*
	 * Set to NULL when the user is destroyed, a pointer to myself
	 * so srcu_dereference can be used on it.
	 */
	struct ipmi_user *self;
	struct srcu_struct release_barrier;
205 206 207

	struct kref refcount;

L
Linus Torvalds 已提交
208
	/* The upper layer that handles receive messages. */
C
Corey Minyard 已提交
209
	const struct ipmi_user_hndl *handler;
L
Linus Torvalds 已提交
210 211 212
	void             *handler_data;

	/* The interface this user is bound to. */
213
	struct ipmi_smi *intf;
L
Linus Torvalds 已提交
214 215

	/* Does this interface receive IPMI events? */
216
	bool gets_events;
217 218 219

	/* Free must run in process context for RCU cleanup. */
	struct work_struct remove_work;
L
Linus Torvalds 已提交
220 221
};

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
	__acquires(user->release_barrier)
{
	struct ipmi_user *ruser;

	*index = srcu_read_lock(&user->release_barrier);
	ruser = srcu_dereference(user->self, &user->release_barrier);
	if (!ruser)
		srcu_read_unlock(&user->release_barrier, *index);
	return ruser;
}

static void release_ipmi_user(struct ipmi_user *user, int index)
{
	srcu_read_unlock(&user->release_barrier, index);
}

239
struct cmd_rcvr {
L
Linus Torvalds 已提交
240 241
	struct list_head link;

242
	struct ipmi_user *user;
L
Linus Torvalds 已提交
243 244
	unsigned char netfn;
	unsigned char cmd;
245
	unsigned int  chans;
246 247 248 249 250 251 252

	/*
	 * This is used to form a linked lised during mass deletion.
	 * Since this is in an RCU list, we cannot use the link above
	 * or change any data until the RCU period completes.  So we
	 * use this next variable during mass deletion so we can have
	 * a list and don't have to wait and restart the search on
253 254
	 * every individual deletion of a command.
	 */
255
	struct cmd_rcvr *next;
L
Linus Torvalds 已提交
256 257
};

258
struct seq_table {
L
Linus Torvalds 已提交
259 260 261 262 263 264 265
	unsigned int         inuse : 1;
	unsigned int         broadcast : 1;

	unsigned long        timeout;
	unsigned long        orig_timeout;
	unsigned int         retries_left;

266 267 268 269 270
	/*
	 * To verify on an incoming send message response that this is
	 * the message that the response is for, we keep a sequence id
	 * and increment it every time we send a message.
	 */
L
Linus Torvalds 已提交
271 272
	long                 seqid;

273 274 275 276 277
	/*
	 * This is held so we can properly respond to the message on a
	 * timeout, and it is used to hold the temporary data for
	 * retransmission, too.
	 */
L
Linus Torvalds 已提交
278 279 280
	struct ipmi_recv_msg *recv_msg;
};

281 282 283 284
/*
 * Store the information in a msgid (long) to allow us to find a
 * sequence table entry from the msgid.
 */
C
Corey Minyard 已提交
285 286
#define STORE_SEQ_IN_MSGID(seq, seqid) \
	((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
L
Linus Torvalds 已提交
287 288 289

#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
	do {								\
C
Corey Minyard 已提交
290 291
		seq = (((msgid) >> 26) & 0x3f);				\
		seqid = ((msgid) & 0x3ffffff);				\
292
	} while (0)
L
Linus Torvalds 已提交
293

C
Corey Minyard 已提交
294
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
L
Linus Torvalds 已提交
295

296
#define IPMI_MAX_CHANNELS       16
297
struct ipmi_channel {
L
Linus Torvalds 已提交
298 299
	unsigned char medium;
	unsigned char protocol;
300
};
301

302 303 304 305
struct ipmi_channel_set {
	struct ipmi_channel c[IPMI_MAX_CHANNELS];
};

306
struct ipmi_my_addrinfo {
307 308 309 310
	/*
	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
	 * but may be changed by the user.
	 */
311 312
	unsigned char address;

313 314 315 316
	/*
	 * My LUN.  This should generally stay the SMS LUN, but just in
	 * case...
	 */
317
	unsigned char lun;
L
Linus Torvalds 已提交
318 319
};

320 321 322 323 324
/*
 * Note that the product id, manufacturer id, guid, and device id are
 * immutable in this structure, so dyn_mutex is not required for
 * accessing those.  If those change on a BMC, a new BMC is allocated.
 */
325
struct bmc_device {
326
	struct platform_device pdev;
327
	struct list_head       intfs; /* Interfaces on this BMC. */
328 329 330 331
	struct ipmi_device_id  id;
	struct ipmi_device_id  fetch_id;
	int                    dyn_id_set;
	unsigned long          dyn_id_expiry;
332
	struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
333 334
	guid_t                 guid;
	guid_t                 fetch_guid;
335
	int                    dyn_guid_set;
336
	struct kref	       usecount;
337
	struct work_struct     remove_work;
338
};
339
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
340

341
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
342
			     struct ipmi_device_id *id,
343
			     bool *guid_set, guid_t *guid);
344

345 346 347 348
/*
 * Various statistics for IPMI, these index stats[] in the ipmi_smi
 * structure.
 */
349 350 351
enum ipmi_stat_indexes {
	/* Commands we got from the user that were invalid. */
	IPMI_STAT_sent_invalid_commands = 0,
352

353 354
	/* Commands we sent to the MC. */
	IPMI_STAT_sent_local_commands,
355

356 357
	/* Responses from the MC that were delivered to a user. */
	IPMI_STAT_handled_local_responses,
358

359 360
	/* Responses from the MC that were not delivered to a user. */
	IPMI_STAT_unhandled_local_responses,
361

362 363
	/* Commands we sent out to the IPMB bus. */
	IPMI_STAT_sent_ipmb_commands,
364

365 366
	/* Commands sent on the IPMB that had errors on the SEND CMD */
	IPMI_STAT_sent_ipmb_command_errs,
367

368 369
	/* Each retransmit increments this count. */
	IPMI_STAT_retransmitted_ipmb_commands,
370

371 372 373 374 375
	/*
	 * When a message times out (runs out of retransmits) this is
	 * incremented.
	 */
	IPMI_STAT_timed_out_ipmb_commands,
376

377 378 379 380 381 382
	/*
	 * This is like above, but for broadcasts.  Broadcasts are
	 * *not* included in the above count (they are expected to
	 * time out).
	 */
	IPMI_STAT_timed_out_ipmb_broadcasts,
383

384 385
	/* Responses I have sent to the IPMB bus. */
	IPMI_STAT_sent_ipmb_responses,
386

387 388
	/* The response was delivered to the user. */
	IPMI_STAT_handled_ipmb_responses,
389

390 391
	/* The response had invalid data in it. */
	IPMI_STAT_invalid_ipmb_responses,
392

393 394
	/* The response didn't have anyone waiting for it. */
	IPMI_STAT_unhandled_ipmb_responses,
395

396 397
	/* Commands we sent out to the IPMB bus. */
	IPMI_STAT_sent_lan_commands,
398

399 400
	/* Commands sent on the IPMB that had errors on the SEND CMD */
	IPMI_STAT_sent_lan_command_errs,
401

402 403
	/* Each retransmit increments this count. */
	IPMI_STAT_retransmitted_lan_commands,
404

405 406 407 408 409 410 411 412
	/*
	 * When a message times out (runs out of retransmits) this is
	 * incremented.
	 */
	IPMI_STAT_timed_out_lan_commands,

	/* Responses I have sent to the IPMB bus. */
	IPMI_STAT_sent_lan_responses,
413

414 415
	/* The response was delivered to the user. */
	IPMI_STAT_handled_lan_responses,
416

417 418
	/* The response had invalid data in it. */
	IPMI_STAT_invalid_lan_responses,
419

420 421
	/* The response didn't have anyone waiting for it. */
	IPMI_STAT_unhandled_lan_responses,
422

423 424
	/* The command was delivered to the user. */
	IPMI_STAT_handled_commands,
425

426 427
	/* The command had invalid data in it. */
	IPMI_STAT_invalid_commands,
428

429 430
	/* The command didn't have anyone waiting for it. */
	IPMI_STAT_unhandled_commands,
431

432 433
	/* Invalid data in an event. */
	IPMI_STAT_invalid_events,
434

435 436
	/* Events that were received with the proper format. */
	IPMI_STAT_events,
437

438 439 440 441 442
	/* Retransmissions on IPMB that failed. */
	IPMI_STAT_dropped_rexmit_ipmb_commands,

	/* Retransmissions on LAN that failed. */
	IPMI_STAT_dropped_rexmit_lan_commands,
443

444 445 446
	/* This *must* remain last, add new values above this. */
	IPMI_NUM_STATS
};
447 448


L
Linus Torvalds 已提交
449
#define IPMI_IPMB_NUM_SEQ	64
450
struct ipmi_smi {
L
Linus Torvalds 已提交
451 452 453
	/* What interface number are we? */
	int intf_num;

454 455
	struct kref refcount;

456 457 458
	/* Set when the interface is being unregistered. */
	bool in_shutdown;

459 460 461
	/* Used for a list of interfaces. */
	struct list_head link;

462
	/*
463 464
	 * The list of upper layers that are using me.  seq_lock write
	 * protects this.  Read protection is with srcu.
465
	 */
466
	struct list_head users;
467
	struct srcu_struct users_srcu;
L
Linus Torvalds 已提交
468 469 470 471

	/* Used for wake ups at startup. */
	wait_queue_head_t waitq;

472 473 474 475 476 477 478
	/*
	 * Prevents the interface from being unregistered when the
	 * interface is used by being looked up through the BMC
	 * structure.
	 */
	struct mutex bmc_reg_mutex;

479
	struct bmc_device tmp_bmc;
480
	struct bmc_device *bmc;
C
Corey Minyard 已提交
481
	bool bmc_registered;
482
	struct list_head bmc_link;
483
	char *my_dev_name;
484
	bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
485
	struct work_struct bmc_reg_work;
L
Linus Torvalds 已提交
486

487
	const struct ipmi_smi_handlers *handlers;
L
Linus Torvalds 已提交
488 489
	void                     *send_info;

490 491 492
	/* Driver-model device for the system interface. */
	struct device          *si_dev;

493 494 495 496 497 498
	/*
	 * A table of sequence numbers for this interface.  We use the
	 * sequence numbers for IPMB messages that go out of the
	 * interface to match them up with their responses.  A routine
	 * is called periodically to time the items in this list.
	 */
L
Linus Torvalds 已提交
499 500 501 502
	spinlock_t       seq_lock;
	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
	int curr_seq;

503
	/*
504 505 506 507
	 * Messages queued for delivery.  If delivery fails (out of memory
	 * for instance), They will stay in here to be processed later in a
	 * periodic timer interrupt.  The tasklet is for handling received
	 * messages directly from the handler.
508
	 */
509 510
	spinlock_t       waiting_rcv_msgs_lock;
	struct list_head waiting_rcv_msgs;
511 512
	atomic_t	 watchdog_pretimeouts_to_deliver;
	struct tasklet_struct recv_tasklet;
L
Linus Torvalds 已提交
513

514 515 516 517 518
	spinlock_t             xmit_msgs_lock;
	struct list_head       xmit_msgs;
	struct ipmi_smi_msg    *curr_msg;
	struct list_head       hp_xmit_msgs;

519 520 521 522
	/*
	 * The list of command receivers that are registered for commands
	 * on this interface.
	 */
523
	struct mutex     cmd_rcvrs_mutex;
L
Linus Torvalds 已提交
524 525
	struct list_head cmd_rcvrs;

526 527 528 529
	/*
	 * Events that were queues because no one was there to receive
	 * them.
	 */
L
Linus Torvalds 已提交
530 531 532
	spinlock_t       events_lock; /* For dealing with event stuff. */
	struct list_head waiting_events;
	unsigned int     waiting_events_count; /* How many events in queue? */
533 534
	char             delivering_events;
	char             event_msg_printed;
535 536

	/* How many users are waiting for events? */
537 538
	atomic_t         event_waiters;
	unsigned int     ticks_to_req_ev;
539

540 541
	spinlock_t       watch_lock; /* For dealing with watch stuff below. */

542
	/* How many users are waiting for commands? */
543
	unsigned int     command_waiters;
544 545

	/* How many users are waiting for watchdogs? */
546 547 548 549
	unsigned int     watchdog_waiters;

	/* How many users are waiting for message responses? */
	unsigned int     response_waiters;
550 551 552

	/*
	 * Tells what the lower layer has last been asked to watch for,
553
	 * messages and/or watchdogs.  Protected by watch_lock.
554 555
	 */
	unsigned int     last_watch_mask;
L
Linus Torvalds 已提交
556

557 558 559 560
	/*
	 * The event receiver for my BMC, only really used at panic
	 * shutdown as a place to store this.
	 */
L
Linus Torvalds 已提交
561 562 563 564 565
	unsigned char event_receiver;
	unsigned char event_receiver_lun;
	unsigned char local_sel_device;
	unsigned char local_event_generator;

C
Corey Minyard 已提交
566 567
	/* For handling of maintenance mode. */
	int maintenance_mode;
C
Corey Minyard 已提交
568
	bool maintenance_mode_enable;
C
Corey Minyard 已提交
569 570 571
	int auto_maintenance_timeout;
	spinlock_t maintenance_mode_lock; /* Used in a timer... */

572 573 574 575 576 577 578
	/*
	 * If we are doing maintenance on something on IPMB, extend
	 * the timeout time to avoid timeouts writing firmware and
	 * such.
	 */
	int ipmb_maintenance_mode_timeout;

579 580 581 582 583
	/*
	 * A cheap hack, if this is non-null and a message to an
	 * interface comes in with a NULL user, call this routine with
	 * it.  Note that the message will still be freed by the
	 * caller.  This only works on the system interface.
584
	 *
585
	 * Protected by bmc_reg_mutex.
586
	 */
587 588
	void (*null_user_handler)(struct ipmi_smi *intf,
				  struct ipmi_recv_msg *msg);
L
Linus Torvalds 已提交
589

590 591 592 593
	/*
	 * When we are scanning the channels for an SMI, this will
	 * tell which channel we are scanning.
	 */
L
Linus Torvalds 已提交
594 595 596
	int curr_channel;

	/* Channel information */
597 598 599
	struct ipmi_channel_set *channel_list;
	unsigned int curr_working_cset; /* First index into the following. */
	struct ipmi_channel_set wchannels[2];
600
	struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
601
	bool channels_ready;
L
Linus Torvalds 已提交
602

603
	atomic_t stats[IPMI_NUM_STATS];
604 605 606 607 608 609 610

	/*
	 * run_to_completion duplicate of smb_info, smi_info
	 * and ipmi_serial_info structures. Used to decrease numbers of
	 * parameters passed by "low" level IPMI code.
	 */
	int run_to_completion;
L
Linus Torvalds 已提交
611
};
612
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
L
Linus Torvalds 已提交
613

614 615 616
static void __get_guid(struct ipmi_smi *intf);
static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
static int __ipmi_bmc_register(struct ipmi_smi *intf,
617
			       struct ipmi_device_id *id,
618
			       bool guid_set, guid_t *guid, int intf_num);
619
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
620

621

622 623 624
/**
 * The driver model view of the IPMI messaging driver.
 */
625 626 627 628 629
static struct platform_driver ipmidriver = {
	.driver = {
		.name = "ipmi",
		.bus = &platform_bus_type
	}
630
};
631
/*
632
 * This mutex keeps us from adding the same BMC twice.
633
 */
634 635
static DEFINE_MUTEX(ipmidriver_mutex);

636
static LIST_HEAD(ipmi_interfaces);
637
static DEFINE_MUTEX(ipmi_interfaces_mutex);
638
static struct srcu_struct ipmi_interfaces_srcu;
L
Linus Torvalds 已提交
639

640 641 642
/*
 * List of watchers that want to know when smi's are added and deleted.
 */
643
static LIST_HEAD(smi_watchers);
644
static DEFINE_MUTEX(smi_watchers_mutex);
L
Linus Torvalds 已提交
645

646 647 648 649 650
#define ipmi_inc_stat(intf, stat) \
	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
#define ipmi_get_stat(intf, stat) \
	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))

651 652
static const char * const addr_src_to_str[] = {
	"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
653
	"device-tree", "platform"
654
};
655 656 657

const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
{
658
	if (src >= SI_LAST)
659 660 661 662 663
		src = 0; /* Invalid */
	return addr_src_to_str[src];
}
EXPORT_SYMBOL(ipmi_addr_src_to_str);

664 665 666 667 668 669 670 671 672 673 674 675 676 677
static int is_lan_addr(struct ipmi_addr *addr)
{
	return addr->addr_type == IPMI_LAN_ADDR_TYPE;
}

static int is_ipmb_addr(struct ipmi_addr *addr)
{
	return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
}

static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
{
	return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
}
678

679 680 681 682 683 684 685 686 687 688
static void free_recv_msg_list(struct list_head *q)
{
	struct ipmi_recv_msg *msg, *msg2;

	list_for_each_entry_safe(msg, msg2, q, link) {
		list_del(&msg->link);
		ipmi_free_recv_msg(msg);
	}
}

689 690 691 692 693 694 695 696 697 698
static void free_smi_msg_list(struct list_head *q)
{
	struct ipmi_smi_msg *msg, *msg2;

	list_for_each_entry_safe(msg, msg2, q, link) {
		list_del(&msg->link);
		ipmi_free_smi_msg(msg);
	}
}

699
static void clean_up_interface_data(struct ipmi_smi *intf)
700 701 702 703 704
{
	int              i;
	struct cmd_rcvr  *rcvr, *rcvr2;
	struct list_head list;

705 706
	tasklet_kill(&intf->recv_tasklet);

707
	free_smi_msg_list(&intf->waiting_rcv_msgs);
708 709
	free_recv_msg_list(&intf->waiting_events);

710 711 712 713
	/*
	 * Wholesale remove all the entries from the list in the
	 * interface and wait for RCU to know that none are in use.
	 */
714
	mutex_lock(&intf->cmd_rcvrs_mutex);
715 716
	INIT_LIST_HEAD(&list);
	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
717
	mutex_unlock(&intf->cmd_rcvrs_mutex);
718 719 720 721 722 723

	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
		kfree(rcvr);

	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
		if ((intf->seq_table[i].inuse)
724
					&& (intf->seq_table[i].recv_msg))
725 726 727 728 729 730
			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
	}
}

static void intf_free(struct kref *ref)
{
731
	struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
732 733 734 735 736

	clean_up_interface_data(intf);
	kfree(intf);
}

737
struct watcher_entry {
738
	int              intf_num;
739
	struct ipmi_smi  *intf;
740 741 742
	struct list_head link;
};

L
Linus Torvalds 已提交
743 744
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
745
	struct ipmi_smi *intf;
746 747 748 749 750 751 752 753 754
	int index, rv;

	/*
	 * Make sure the driver is actually initialized, this handles
	 * problems with initialization order.
	 */
	rv = ipmi_init_msghandler();
	if (rv)
		return rv;
755

756 757 758
	mutex_lock(&smi_watchers_mutex);

	list_add(&watcher->link, &smi_watchers);
759

760 761 762
	index = srcu_read_lock(&ipmi_interfaces_srcu);
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		int intf_num = READ_ONCE(intf->intf_num);
763

764 765 766
		if (intf_num == -1)
			continue;
		watcher->new_smi(intf_num, intf->si_dev);
L
Linus Torvalds 已提交
767
	}
768
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
769

770
	mutex_unlock(&smi_watchers_mutex);
771

L
Linus Torvalds 已提交
772 773
	return 0;
}
774
EXPORT_SYMBOL(ipmi_smi_watcher_register);
L
Linus Torvalds 已提交
775 776 777

int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
{
778
	mutex_lock(&smi_watchers_mutex);
779
	list_del(&watcher->link);
780
	mutex_unlock(&smi_watchers_mutex);
L
Linus Torvalds 已提交
781 782
	return 0;
}
783
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
L
Linus Torvalds 已提交
784

785 786 787
/*
 * Must be called with smi_watchers_mutex held.
 */
L
Linus Torvalds 已提交
788
static void
789
call_smi_watchers(int i, struct device *dev)
L
Linus Torvalds 已提交
790 791 792
{
	struct ipmi_smi_watcher *w;

793
	mutex_lock(&smi_watchers_mutex);
L
Linus Torvalds 已提交
794 795
	list_for_each_entry(w, &smi_watchers, link) {
		if (try_module_get(w->owner)) {
796
			w->new_smi(i, dev);
L
Linus Torvalds 已提交
797 798 799
			module_put(w->owner);
		}
	}
800
	mutex_unlock(&smi_watchers_mutex);
L
Linus Torvalds 已提交
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
}

static int
ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
{
	if (addr1->addr_type != addr2->addr_type)
		return 0;

	if (addr1->channel != addr2->channel)
		return 0;

	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
		struct ipmi_system_interface_addr *smi_addr1
		    = (struct ipmi_system_interface_addr *) addr1;
		struct ipmi_system_interface_addr *smi_addr2
		    = (struct ipmi_system_interface_addr *) addr2;
		return (smi_addr1->lun == smi_addr2->lun);
	}

820
	if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
L
Linus Torvalds 已提交
821 822 823 824 825 826 827 828 829
		struct ipmi_ipmb_addr *ipmb_addr1
		    = (struct ipmi_ipmb_addr *) addr1;
		struct ipmi_ipmb_addr *ipmb_addr2
		    = (struct ipmi_ipmb_addr *) addr2;

		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
			&& (ipmb_addr1->lun == ipmb_addr2->lun));
	}

830
	if (is_lan_addr(addr1)) {
L
Linus Torvalds 已提交
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
		struct ipmi_lan_addr *lan_addr1
			= (struct ipmi_lan_addr *) addr1;
		struct ipmi_lan_addr *lan_addr2
		    = (struct ipmi_lan_addr *) addr2;

		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
			&& (lan_addr1->session_handle
			    == lan_addr2->session_handle)
			&& (lan_addr1->lun == lan_addr2->lun));
	}

	return 1;
}

int ipmi_validate_addr(struct ipmi_addr *addr, int len)
{
848
	if (len < sizeof(struct ipmi_system_interface_addr))
L
Linus Torvalds 已提交
849 850 851 852 853 854 855 856 857
		return -EINVAL;

	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
		if (addr->channel != IPMI_BMC_CHANNEL)
			return -EINVAL;
		return 0;
	}

	if ((addr->channel == IPMI_BMC_CHANNEL)
858
	    || (addr->channel >= IPMI_MAX_CHANNELS)
L
Linus Torvalds 已提交
859 860 861
	    || (addr->channel < 0))
		return -EINVAL;

862
	if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
863
		if (len < sizeof(struct ipmi_ipmb_addr))
L
Linus Torvalds 已提交
864 865 866 867
			return -EINVAL;
		return 0;
	}

868
	if (is_lan_addr(addr)) {
869
		if (len < sizeof(struct ipmi_lan_addr))
L
Linus Torvalds 已提交
870 871 872 873 874 875
			return -EINVAL;
		return 0;
	}

	return -EINVAL;
}
876
EXPORT_SYMBOL(ipmi_validate_addr);
L
Linus Torvalds 已提交
877 878 879 880 881 882 883

unsigned int ipmi_addr_length(int addr_type)
{
	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
		return sizeof(struct ipmi_system_interface_addr);

	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
884
			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
L
Linus Torvalds 已提交
885 886 887 888 889 890 891
		return sizeof(struct ipmi_ipmb_addr);

	if (addr_type == IPMI_LAN_ADDR_TYPE)
		return sizeof(struct ipmi_lan_addr);

	return 0;
}
892
EXPORT_SYMBOL(ipmi_addr_length);
L
Linus Torvalds 已提交
893

C
Corey Minyard 已提交
894
static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
L
Linus Torvalds 已提交
895
{
C
Corey Minyard 已提交
896
	int rv = 0;
897

C
Corey Minyard 已提交
898
	if (!msg->user) {
899 900 901 902 903
		/* Special handling for NULL users. */
		if (intf->null_user_handler) {
			intf->null_user_handler(intf, msg);
		} else {
			/* No handler, so give up. */
C
Corey Minyard 已提交
904
			rv = -EINVAL;
905 906
		}
		ipmi_free_recv_msg(msg);
907
	} else if (oops_in_progress) {
908 909 910 911 912
		/*
		 * If we are running in the panic context, calling the
		 * receive handler doesn't much meaning and has a deadlock
		 * risk.  At this moment, simply skip it in that case.
		 */
913 914
		ipmi_free_recv_msg(msg);
	} else {
915 916
		int index;
		struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
917

918 919
		if (user) {
			user->handler->ipmi_recv_hndl(msg, user->handler_data);
920
			release_ipmi_user(user, index);
921 922 923 924 925
		} else {
			/* User went away, give up. */
			ipmi_free_recv_msg(msg);
			rv = -EINVAL;
		}
926
	}
C
Corey Minyard 已提交
927 928

	return rv;
L
Linus Torvalds 已提交
929 930
}

C
Corey Minyard 已提交
931 932 933 934 935 936 937 938 939 940 941
static void deliver_local_response(struct ipmi_smi *intf,
				   struct ipmi_recv_msg *msg)
{
	if (deliver_response(intf, msg))
		ipmi_inc_stat(intf, unhandled_local_responses);
	else
		ipmi_inc_stat(intf, handled_local_responses);
}

static void deliver_err_response(struct ipmi_smi *intf,
				 struct ipmi_recv_msg *msg, int err)
942 943 944 945 946 947
{
	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
	msg->msg_data[0] = err;
	msg->msg.netfn |= 1; /* Convert to a response. */
	msg->msg.data_len = 1;
	msg->msg.data = msg->msg_data;
C
Corey Minyard 已提交
948
	deliver_local_response(intf, msg);
949 950
}

951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
{
	unsigned long iflags;

	if (!intf->handlers->set_need_watch)
		return;

	spin_lock_irqsave(&intf->watch_lock, iflags);
	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
		intf->response_waiters++;

	if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
		intf->watchdog_waiters++;

	if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
		intf->command_waiters++;

	if ((intf->last_watch_mask & flags) != flags) {
		intf->last_watch_mask |= flags;
		intf->handlers->set_need_watch(intf->send_info,
					       intf->last_watch_mask);
	}
	spin_unlock_irqrestore(&intf->watch_lock, iflags);
}

static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
{
	unsigned long iflags;

	if (!intf->handlers->set_need_watch)
		return;

	spin_lock_irqsave(&intf->watch_lock, iflags);
	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
		intf->response_waiters--;

	if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
		intf->watchdog_waiters--;

	if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
		intf->command_waiters--;

	flags = 0;
	if (intf->response_waiters)
		flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
	if (intf->watchdog_waiters)
		flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
	if (intf->command_waiters)
		flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;

	if (intf->last_watch_mask != flags) {
		intf->last_watch_mask = flags;
		intf->handlers->set_need_watch(intf->send_info,
					       intf->last_watch_mask);
	}
	spin_unlock_irqrestore(&intf->watch_lock, iflags);
}

1009 1010 1011 1012 1013
/*
 * Find the next sequence number not being used and add the given
 * message with the given timeout to the sequence table.  This must be
 * called with the interface's seq_lock held.
 */
1014
static int intf_next_seq(struct ipmi_smi      *intf,
L
Linus Torvalds 已提交
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
			 struct ipmi_recv_msg *recv_msg,
			 unsigned long        timeout,
			 int                  retries,
			 int                  broadcast,
			 unsigned char        *seq,
			 long                 *seqid)
{
	int          rv = 0;
	unsigned int i;

1025 1026 1027 1028 1029
	if (timeout == 0)
		timeout = default_retry_ms;
	if (retries < 0)
		retries = default_max_retries;

1030 1031
	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1032
		if (!intf->seq_table[i].inuse)
L
Linus Torvalds 已提交
1033 1034 1035
			break;
	}

1036
	if (!intf->seq_table[i].inuse) {
L
Linus Torvalds 已提交
1037 1038
		intf->seq_table[i].recv_msg = recv_msg;

1039 1040 1041 1042
		/*
		 * Start with the maximum timeout, when the send response
		 * comes in we will start the real timer.
		 */
L
Linus Torvalds 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051
		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
		intf->seq_table[i].orig_timeout = timeout;
		intf->seq_table[i].retries_left = retries;
		intf->seq_table[i].broadcast = broadcast;
		intf->seq_table[i].inuse = 1;
		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
		*seq = i;
		*seqid = intf->seq_table[i].seqid;
		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1052
		smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1053
		need_waiter(intf);
L
Linus Torvalds 已提交
1054 1055 1056
	} else {
		rv = -EAGAIN;
	}
1057

L
Linus Torvalds 已提交
1058 1059 1060
	return rv;
}

1061 1062 1063 1064 1065 1066 1067
/*
 * Return the receive message for the given sequence number and
 * release the sequence number so it can be reused.  Some other data
 * is passed in to be sure the message matches up correctly (to help
 * guard against message coming in after their timeout and the
 * sequence number being reused).
 */
1068
static int intf_find_seq(struct ipmi_smi      *intf,
L
Linus Torvalds 已提交
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
			 unsigned char        seq,
			 short                channel,
			 unsigned char        cmd,
			 unsigned char        netfn,
			 struct ipmi_addr     *addr,
			 struct ipmi_recv_msg **recv_msg)
{
	int           rv = -ENODEV;
	unsigned long flags;

	if (seq >= IPMI_IPMB_NUM_SEQ)
		return -EINVAL;

1082
	spin_lock_irqsave(&intf->seq_lock, flags);
L
Linus Torvalds 已提交
1083 1084 1085
	if (intf->seq_table[seq].inuse) {
		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;

1086 1087
		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
				&& (msg->msg.netfn == netfn)
1088
				&& (ipmi_addr_equal(addr, &msg->addr))) {
L
Linus Torvalds 已提交
1089 1090
			*recv_msg = msg;
			intf->seq_table[seq].inuse = 0;
1091
			smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
L
Linus Torvalds 已提交
1092 1093 1094
			rv = 0;
		}
	}
1095
	spin_unlock_irqrestore(&intf->seq_lock, flags);
L
Linus Torvalds 已提交
1096 1097 1098 1099 1100 1101

	return rv;
}


/* Start the timer for a specific sequence table entry. */
1102
static int intf_start_seq_timer(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
				long       msgid)
{
	int           rv = -ENODEV;
	unsigned long flags;
	unsigned char seq;
	unsigned long seqid;


	GET_SEQ_FROM_MSGID(msgid, seq, seqid);

1113
	spin_lock_irqsave(&intf->seq_lock, flags);
1114 1115 1116 1117
	/*
	 * We do this verification because the user can be deleted
	 * while a message is outstanding.
	 */
L
Linus Torvalds 已提交
1118
	if ((intf->seq_table[seq].inuse)
1119
				&& (intf->seq_table[seq].seqid == seqid)) {
1120
		struct seq_table *ent = &intf->seq_table[seq];
L
Linus Torvalds 已提交
1121 1122 1123
		ent->timeout = ent->orig_timeout;
		rv = 0;
	}
1124
	spin_unlock_irqrestore(&intf->seq_lock, flags);
L
Linus Torvalds 已提交
1125 1126 1127 1128 1129

	return rv;
}

/* Got an error for the send message for a specific sequence number. */
1130
static int intf_err_seq(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
			long         msgid,
			unsigned int err)
{
	int                  rv = -ENODEV;
	unsigned long        flags;
	unsigned char        seq;
	unsigned long        seqid;
	struct ipmi_recv_msg *msg = NULL;


	GET_SEQ_FROM_MSGID(msgid, seq, seqid);

1143
	spin_lock_irqsave(&intf->seq_lock, flags);
1144 1145 1146 1147
	/*
	 * We do this verification because the user can be deleted
	 * while a message is outstanding.
	 */
L
Linus Torvalds 已提交
1148
	if ((intf->seq_table[seq].inuse)
1149
				&& (intf->seq_table[seq].seqid == seqid)) {
1150
		struct seq_table *ent = &intf->seq_table[seq];
L
Linus Torvalds 已提交
1151 1152

		ent->inuse = 0;
1153
		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
L
Linus Torvalds 已提交
1154 1155 1156
		msg = ent->recv_msg;
		rv = 0;
	}
1157
	spin_unlock_irqrestore(&intf->seq_lock, flags);
L
Linus Torvalds 已提交
1158

1159
	if (msg)
C
Corey Minyard 已提交
1160
		deliver_err_response(intf, msg, err);
L
Linus Torvalds 已提交
1161 1162 1163 1164

	return rv;
}

1165 1166 1167 1168 1169 1170 1171 1172 1173
static void free_user_work(struct work_struct *work)
{
	struct ipmi_user *user = container_of(work, struct ipmi_user,
					      remove_work);

	cleanup_srcu_struct(&user->release_barrier);
	kfree(user);
}

L
Linus Torvalds 已提交
1174
int ipmi_create_user(unsigned int          if_num,
C
Corey Minyard 已提交
1175
		     const struct ipmi_user_hndl *handler,
L
Linus Torvalds 已提交
1176
		     void                  *handler_data,
1177
		     struct ipmi_user      **user)
L
Linus Torvalds 已提交
1178 1179
{
	unsigned long flags;
1180
	struct ipmi_user *new_user;
1181
	int           rv, index;
1182
	struct ipmi_smi *intf;
L
Linus Torvalds 已提交
1183

1184 1185 1186 1187 1188 1189 1190
	/*
	 * There is no module usecount here, because it's not
	 * required.  Since this can only be used by and called from
	 * other modules, they will implicitly use this module, and
	 * thus this can't be removed unless the other modules are
	 * removed.
	 */
L
Linus Torvalds 已提交
1191 1192 1193 1194

	if (handler == NULL)
		return -EINVAL;

1195 1196 1197 1198
	/*
	 * Make sure the driver is actually initialized, this handles
	 * problems with initialization order.
	 */
1199 1200 1201
	rv = ipmi_init_msghandler();
	if (rv)
		return rv;
L
Linus Torvalds 已提交
1202 1203

	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1204
	if (!new_user)
L
Linus Torvalds 已提交
1205 1206
		return -ENOMEM;

1207
	index = srcu_read_lock(&ipmi_interfaces_srcu);
1208 1209 1210
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		if (intf->intf_num == if_num)
			goto found;
L
Linus Torvalds 已提交
1211
	}
1212
	/* Not found, return an error */
1213 1214
	rv = -EINVAL;
	goto out_kfree;
L
Linus Torvalds 已提交
1215

1216
 found:
1217 1218
	INIT_WORK(&new_user->remove_work, free_user_work);

1219 1220 1221 1222
	rv = init_srcu_struct(&new_user->release_barrier);
	if (rv)
		goto out_kfree;

1223 1224
	/* Note that each existing user holds a refcount to the interface. */
	kref_get(&intf->refcount);
L
Linus Torvalds 已提交
1225

1226
	kref_init(&new_user->refcount);
L
Linus Torvalds 已提交
1227 1228 1229
	new_user->handler = handler;
	new_user->handler_data = handler_data;
	new_user->intf = intf;
1230
	new_user->gets_events = false;
L
Linus Torvalds 已提交
1231

1232
	rcu_assign_pointer(new_user->self, new_user);
1233 1234 1235
	spin_lock_irqsave(&intf->seq_lock, flags);
	list_add_rcu(&new_user->link, &intf->users);
	spin_unlock_irqrestore(&intf->seq_lock, flags);
1236
	if (handler->ipmi_watchdog_pretimeout)
1237
		/* User wants pretimeouts, so make sure to watch for them. */
1238
		smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1239
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1240 1241
	*user = new_user;
	return 0;
L
Linus Torvalds 已提交
1242

1243
out_kfree:
1244
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1245
	kfree(new_user);
L
Linus Torvalds 已提交
1246 1247
	return rv;
}
1248
EXPORT_SYMBOL(ipmi_create_user);
L
Linus Torvalds 已提交
1249

1250 1251
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
1252
	int rv, index;
1253
	struct ipmi_smi *intf;
1254

1255
	index = srcu_read_lock(&ipmi_interfaces_srcu);
1256 1257 1258 1259
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		if (intf->intf_num == if_num)
			goto found;
	}
1260 1261
	srcu_read_unlock(&ipmi_interfaces_srcu, index);

1262
	/* Not found, return an error */
1263
	return -EINVAL;
1264 1265

found:
1266 1267 1268 1269 1270
	if (!intf->handlers->get_smi_info)
		rv = -ENOTTY;
	else
		rv = intf->handlers->get_smi_info(intf->send_info, data);
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1271 1272 1273 1274 1275

	return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);

1276 1277
static void free_user(struct kref *ref)
{
1278
	struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1279 1280 1281

	/* SRCU cleanup must happen in task context. */
	schedule_work(&user->remove_work);
1282 1283
}

1284
static void _ipmi_destroy_user(struct ipmi_user *user)
L
Linus Torvalds 已提交
1285
{
1286
	struct ipmi_smi  *intf = user->intf;
L
Linus Torvalds 已提交
1287 1288
	int              i;
	unsigned long    flags;
1289 1290
	struct cmd_rcvr  *rcvr;
	struct cmd_rcvr  *rcvrs = NULL;
L
Linus Torvalds 已提交
1291

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
	if (!acquire_ipmi_user(user, &i)) {
		/*
		 * The user has already been cleaned up, just make sure
		 * nothing is using it and return.
		 */
		synchronize_srcu(&user->release_barrier);
		return;
	}

	rcu_assign_pointer(user->self, NULL);
	release_ipmi_user(user, i);

	synchronize_srcu(&user->release_barrier);

	if (user->handler->shutdown)
		user->handler->shutdown(user->handler_data);
L
Linus Torvalds 已提交
1308

1309
	if (user->handler->ipmi_watchdog_pretimeout)
1310
		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1311 1312 1313 1314

	if (user->gets_events)
		atomic_dec(&intf->event_waiters);

1315 1316 1317
	/* Remove the user from the interface's sequence table. */
	spin_lock_irqsave(&intf->seq_lock, flags);
	list_del_rcu(&user->link);
L
Linus Torvalds 已提交
1318

C
Corey Minyard 已提交
1319
	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1320
		if (intf->seq_table[i].inuse
1321
		    && (intf->seq_table[i].recv_msg->user == user)) {
1322
			intf->seq_table[i].inuse = 0;
1323
			smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1324
			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
L
Linus Torvalds 已提交
1325 1326
		}
	}
1327 1328 1329 1330 1331 1332
	spin_unlock_irqrestore(&intf->seq_lock, flags);

	/*
	 * Remove the user from the command receiver's table.  First
	 * we build a list of everything (not using the standard link,
	 * since other things may be using it till we do
1333
	 * synchronize_srcu()) then free everything in that list.
1334
	 */
1335
	mutex_lock(&intf->cmd_rcvrs_mutex);
1336
	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
L
Linus Torvalds 已提交
1337
		if (rcvr->user == user) {
1338 1339 1340
			list_del_rcu(&rcvr->link);
			rcvr->next = rcvrs;
			rcvrs = rcvr;
L
Linus Torvalds 已提交
1341 1342
		}
	}
1343
	mutex_unlock(&intf->cmd_rcvrs_mutex);
1344 1345 1346 1347 1348 1349
	synchronize_rcu();
	while (rcvrs) {
		rcvr = rcvrs;
		rcvrs = rcvr->next;
		kfree(rcvr);
	}
L
Linus Torvalds 已提交
1350

1351
	kref_put(&intf->refcount, intf_free);
1352 1353 1354 1355 1356
}

int ipmi_destroy_user(struct ipmi_user *user)
{
	_ipmi_destroy_user(user);
L
Linus Torvalds 已提交
1357

1358
	kref_put(&user->refcount, free_user);
L
Linus Torvalds 已提交
1359

1360
	return 0;
L
Linus Torvalds 已提交
1361
}
1362
EXPORT_SYMBOL(ipmi_destroy_user);
L
Linus Torvalds 已提交
1363

1364
int ipmi_get_version(struct ipmi_user *user,
1365 1366
		     unsigned char *major,
		     unsigned char *minor)
L
Linus Torvalds 已提交
1367
{
1368
	struct ipmi_device_id id;
1369
	int rv, index;
1370

1371 1372 1373
	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;
1374

1375 1376 1377 1378 1379 1380
	rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
	if (!rv) {
		*major = ipmi_version_major(&id);
		*minor = ipmi_version_minor(&id);
	}
	release_ipmi_user(user, index);
1381

1382
	return rv;
L
Linus Torvalds 已提交
1383
}
1384
EXPORT_SYMBOL(ipmi_get_version);
L
Linus Torvalds 已提交
1385

1386
int ipmi_set_my_address(struct ipmi_user *user,
1387 1388
			unsigned int  channel,
			unsigned char address)
L
Linus Torvalds 已提交
1389
{
1390
	int index, rv = 0;
1391 1392 1393 1394 1395

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

1396
	if (channel >= IPMI_MAX_CHANNELS) {
1397
		rv = -EINVAL;
1398 1399
	} else {
		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1400
		user->intf->addrinfo[channel].address = address;
1401
	}
1402 1403
	release_ipmi_user(user, index);

1404
	return rv;
L
Linus Torvalds 已提交
1405
}
1406
EXPORT_SYMBOL(ipmi_set_my_address);
L
Linus Torvalds 已提交
1407

1408
int ipmi_get_my_address(struct ipmi_user *user,
1409 1410
			unsigned int  channel,
			unsigned char *address)
L
Linus Torvalds 已提交
1411
{
1412
	int index, rv = 0;
1413 1414 1415 1416 1417

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

1418
	if (channel >= IPMI_MAX_CHANNELS) {
1419
		rv = -EINVAL;
1420 1421
	} else {
		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1422
		*address = user->intf->addrinfo[channel].address;
1423
	}
1424 1425
	release_ipmi_user(user, index);

1426
	return rv;
L
Linus Torvalds 已提交
1427
}
1428
EXPORT_SYMBOL(ipmi_get_my_address);
L
Linus Torvalds 已提交
1429

1430
int ipmi_set_my_LUN(struct ipmi_user *user,
1431 1432
		    unsigned int  channel,
		    unsigned char LUN)
L
Linus Torvalds 已提交
1433
{
1434
	int index, rv = 0;
1435 1436 1437 1438 1439

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

1440
	if (channel >= IPMI_MAX_CHANNELS) {
1441
		rv = -EINVAL;
1442 1443
	} else {
		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1444
		user->intf->addrinfo[channel].lun = LUN & 0x3;
1445
	}
1446 1447
	release_ipmi_user(user, index);

1448
	return rv;
L
Linus Torvalds 已提交
1449
}
1450
EXPORT_SYMBOL(ipmi_set_my_LUN);
L
Linus Torvalds 已提交
1451

1452
int ipmi_get_my_LUN(struct ipmi_user *user,
1453 1454
		    unsigned int  channel,
		    unsigned char *address)
L
Linus Torvalds 已提交
1455
{
1456
	int index, rv = 0;
1457 1458 1459 1460 1461

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

1462
	if (channel >= IPMI_MAX_CHANNELS) {
1463
		rv = -EINVAL;
1464 1465
	} else {
		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1466
		*address = user->intf->addrinfo[channel].lun;
1467
	}
1468 1469
	release_ipmi_user(user, index);

1470
	return rv;
L
Linus Torvalds 已提交
1471
}
1472
EXPORT_SYMBOL(ipmi_get_my_LUN);
L
Linus Torvalds 已提交
1473

1474
int ipmi_get_maintenance_mode(struct ipmi_user *user)
C
Corey Minyard 已提交
1475
{
1476
	int mode, index;
C
Corey Minyard 已提交
1477 1478
	unsigned long flags;

1479 1480 1481 1482
	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

C
Corey Minyard 已提交
1483 1484 1485
	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
	mode = user->intf->maintenance_mode;
	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1486
	release_ipmi_user(user, index);
C
Corey Minyard 已提交
1487 1488 1489 1490 1491

	return mode;
}
EXPORT_SYMBOL(ipmi_get_maintenance_mode);

1492
static void maintenance_mode_update(struct ipmi_smi *intf)
C
Corey Minyard 已提交
1493 1494 1495 1496 1497 1498
{
	if (intf->handlers->set_maintenance_mode)
		intf->handlers->set_maintenance_mode(
			intf->send_info, intf->maintenance_mode_enable);
}

1499
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
C
Corey Minyard 已提交
1500
{
1501
	int rv = 0, index;
C
Corey Minyard 已提交
1502
	unsigned long flags;
1503
	struct ipmi_smi *intf = user->intf;
C
Corey Minyard 已提交
1504

1505 1506 1507 1508
	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

C
Corey Minyard 已提交
1509 1510 1511 1512 1513 1514 1515 1516 1517
	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
	if (intf->maintenance_mode != mode) {
		switch (mode) {
		case IPMI_MAINTENANCE_MODE_AUTO:
			intf->maintenance_mode_enable
				= (intf->auto_maintenance_timeout > 0);
			break;

		case IPMI_MAINTENANCE_MODE_OFF:
C
Corey Minyard 已提交
1518
			intf->maintenance_mode_enable = false;
C
Corey Minyard 已提交
1519 1520 1521
			break;

		case IPMI_MAINTENANCE_MODE_ON:
C
Corey Minyard 已提交
1522
			intf->maintenance_mode_enable = true;
C
Corey Minyard 已提交
1523 1524 1525 1526 1527 1528
			break;

		default:
			rv = -EINVAL;
			goto out_unlock;
		}
C
Corey Minyard 已提交
1529
		intf->maintenance_mode = mode;
C
Corey Minyard 已提交
1530 1531 1532 1533 1534

		maintenance_mode_update(intf);
	}
 out_unlock:
	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1535
	release_ipmi_user(user, index);
C
Corey Minyard 已提交
1536 1537 1538 1539 1540

	return rv;
}
EXPORT_SYMBOL(ipmi_set_maintenance_mode);

1541
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
L
Linus Torvalds 已提交
1542
{
1543
	unsigned long        flags;
1544
	struct ipmi_smi      *intf = user->intf;
1545 1546
	struct ipmi_recv_msg *msg, *msg2;
	struct list_head     msgs;
1547 1548 1549 1550 1551
	int index;

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;
L
Linus Torvalds 已提交
1552

1553 1554 1555
	INIT_LIST_HEAD(&msgs);

	spin_lock_irqsave(&intf->events_lock, flags);
1556 1557 1558
	if (user->gets_events == val)
		goto out;

L
Linus Torvalds 已提交
1559 1560
	user->gets_events = val;

1561 1562 1563 1564 1565 1566 1567
	if (val) {
		if (atomic_inc_return(&intf->event_waiters) == 1)
			need_waiter(intf);
	} else {
		atomic_dec(&intf->event_waiters);
	}

1568 1569 1570 1571 1572 1573 1574 1575 1576
	if (intf->delivering_events)
		/*
		 * Another thread is delivering events for this, so
		 * let it handle any new events.
		 */
		goto out;

	/* Deliver any queued events. */
	while (user->gets_events && !list_empty(&intf->waiting_events)) {
A
Akinobu Mita 已提交
1577 1578
		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
			list_move_tail(&msg->link, &msgs);
1579
		intf->waiting_events_count = 0;
1580
		if (intf->event_msg_printed) {
1581
			dev_warn(intf->si_dev, "Event queue no longer full\n");
1582 1583
			intf->event_msg_printed = 0;
		}
1584

1585 1586 1587 1588 1589 1590
		intf->delivering_events = 1;
		spin_unlock_irqrestore(&intf->events_lock, flags);

		list_for_each_entry_safe(msg, msg2, &msgs, link) {
			msg->user = user;
			kref_get(&user->refcount);
C
Corey Minyard 已提交
1591
			deliver_local_response(intf, msg);
1592 1593 1594 1595
		}

		spin_lock_irqsave(&intf->events_lock, flags);
		intf->delivering_events = 0;
1596 1597
	}

1598
 out:
1599
	spin_unlock_irqrestore(&intf->events_lock, flags);
1600
	release_ipmi_user(user, index);
L
Linus Torvalds 已提交
1601 1602 1603

	return 0;
}
1604
EXPORT_SYMBOL(ipmi_set_gets_events);
L
Linus Torvalds 已提交
1605

1606
static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1607
				      unsigned char netfn,
1608 1609
				      unsigned char cmd,
				      unsigned char chan)
1610 1611 1612 1613
{
	struct cmd_rcvr *rcvr;

	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1614 1615
		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
					&& (rcvr->chans & (1 << chan)))
1616 1617 1618 1619 1620
			return rcvr;
	}
	return NULL;
}

1621
static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
				 unsigned char netfn,
				 unsigned char cmd,
				 unsigned int  chans)
{
	struct cmd_rcvr *rcvr;

	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
					&& (rcvr->chans & chans))
			return 0;
	}
	return 1;
}

1636
int ipmi_register_for_cmd(struct ipmi_user *user,
L
Linus Torvalds 已提交
1637
			  unsigned char netfn,
1638 1639
			  unsigned char cmd,
			  unsigned int  chans)
L
Linus Torvalds 已提交
1640
{
1641
	struct ipmi_smi *intf = user->intf;
1642
	struct cmd_rcvr *rcvr;
1643
	int rv = 0, index;
L
Linus Torvalds 已提交
1644

1645 1646 1647
	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;
L
Linus Torvalds 已提交
1648 1649

	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1650 1651 1652 1653
	if (!rcvr) {
		rv = -ENOMEM;
		goto out_release;
	}
1654 1655
	rcvr->cmd = cmd;
	rcvr->netfn = netfn;
1656
	rcvr->chans = chans;
1657
	rcvr->user = user;
L
Linus Torvalds 已提交
1658

1659
	mutex_lock(&intf->cmd_rcvrs_mutex);
L
Linus Torvalds 已提交
1660
	/* Make sure the command/netfn is not already registered. */
1661
	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1662 1663
		rv = -EBUSY;
		goto out_unlock;
L
Linus Torvalds 已提交
1664
	}
1665

1666
	smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1667

1668
	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
L
Linus Torvalds 已提交
1669

1670
out_unlock:
1671
	mutex_unlock(&intf->cmd_rcvrs_mutex);
L
Linus Torvalds 已提交
1672 1673
	if (rv)
		kfree(rcvr);
1674
out_release:
1675
	release_ipmi_user(user, index);
L
Linus Torvalds 已提交
1676 1677 1678

	return rv;
}
1679
EXPORT_SYMBOL(ipmi_register_for_cmd);
L
Linus Torvalds 已提交
1680

1681
int ipmi_unregister_for_cmd(struct ipmi_user *user,
L
Linus Torvalds 已提交
1682
			    unsigned char netfn,
1683 1684
			    unsigned char cmd,
			    unsigned int  chans)
L
Linus Torvalds 已提交
1685
{
1686
	struct ipmi_smi *intf = user->intf;
1687
	struct cmd_rcvr *rcvr;
1688
	struct cmd_rcvr *rcvrs = NULL;
1689 1690 1691 1692 1693
	int i, rv = -ENOENT, index;

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;
L
Linus Torvalds 已提交
1694

1695
	mutex_lock(&intf->cmd_rcvrs_mutex);
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
		if (((1 << i) & chans) == 0)
			continue;
		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
		if (rcvr == NULL)
			continue;
		if (rcvr->user == user) {
			rv = 0;
			rcvr->chans &= ~chans;
			if (rcvr->chans == 0) {
				list_del_rcu(&rcvr->link);
				rcvr->next = rcvrs;
				rcvrs = rcvr;
			}
		}
	}
	mutex_unlock(&intf->cmd_rcvrs_mutex);
	synchronize_rcu();
1714
	release_ipmi_user(user, index);
1715
	while (rcvrs) {
1716
		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1717 1718
		rcvr = rcvrs;
		rcvrs = rcvr->next;
1719
		kfree(rcvr);
L
Linus Torvalds 已提交
1720
	}
1721

1722
	return rv;
L
Linus Torvalds 已提交
1723
}
1724
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
L
Linus Torvalds 已提交
1725 1726 1727 1728 1729

static unsigned char
ipmb_checksum(unsigned char *data, int size)
{
	unsigned char csum = 0;
1730

L
Linus Torvalds 已提交
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
	for (; size > 0; size--, data++)
		csum += *data;

	return -csum;
}

static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
				   struct kernel_ipmi_msg *msg,
				   struct ipmi_ipmb_addr *ipmb_addr,
				   long                  msgid,
				   unsigned char         ipmb_seq,
				   int                   broadcast,
				   unsigned char         source_address,
				   unsigned char         source_lun)
{
	int i = broadcast;

	/* Format the IPMB header data. */
	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
	smi_msg->data[2] = ipmb_addr->channel;
	if (broadcast)
		smi_msg->data[3] = 0;
	smi_msg->data[i+3] = ipmb_addr->slave_addr;
	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1756
	smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
L
Linus Torvalds 已提交
1757 1758 1759 1760 1761 1762
	smi_msg->data[i+6] = source_address;
	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
	smi_msg->data[i+8] = msg->cmd;

	/* Now tack on the data to the message. */
	if (msg->data_len > 0)
1763
		memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
L
Linus Torvalds 已提交
1764 1765 1766 1767
	smi_msg->data_size = msg->data_len + 9;

	/* Now calculate the checksum and tack it on. */
	smi_msg->data[i+smi_msg->data_size]
1768
		= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
L
Linus Torvalds 已提交
1769

1770 1771 1772 1773
	/*
	 * Add on the checksum size and the offset from the
	 * broadcast.
	 */
L
Linus Torvalds 已提交
1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
	smi_msg->data_size += 1 + i;

	smi_msg->msgid = msgid;
}

static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
				  struct kernel_ipmi_msg *msg,
				  struct ipmi_lan_addr  *lan_addr,
				  long                  msgid,
				  unsigned char         ipmb_seq,
				  unsigned char         source_lun)
{
	/* Format the IPMB header data. */
	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
	smi_msg->data[2] = lan_addr->channel;
	smi_msg->data[3] = lan_addr->session_handle;
	smi_msg->data[4] = lan_addr->remote_SWID;
	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1793
	smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
L
Linus Torvalds 已提交
1794 1795 1796 1797 1798 1799
	smi_msg->data[7] = lan_addr->local_SWID;
	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
	smi_msg->data[9] = msg->cmd;

	/* Now tack on the data to the message. */
	if (msg->data_len > 0)
1800
		memcpy(&smi_msg->data[10], msg->data, msg->data_len);
L
Linus Torvalds 已提交
1801 1802 1803 1804
	smi_msg->data_size = msg->data_len + 10;

	/* Now calculate the checksum and tack it on. */
	smi_msg->data[smi_msg->data_size]
1805
		= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
L
Linus Torvalds 已提交
1806

1807 1808 1809 1810
	/*
	 * Add on the checksum size and the offset from the
	 * broadcast.
	 */
L
Linus Torvalds 已提交
1811 1812 1813 1814 1815
	smi_msg->data_size += 1;

	smi_msg->msgid = msgid;
}

1816
static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
A
Arnd Bergmann 已提交
1817 1818
					     struct ipmi_smi_msg *smi_msg,
					     int priority)
1819
{
1820 1821 1822 1823 1824 1825 1826 1827 1828
	if (intf->curr_msg) {
		if (priority > 0)
			list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
		else
			list_add_tail(&smi_msg->link, &intf->xmit_msgs);
		smi_msg = NULL;
	} else {
		intf->curr_msg = smi_msg;
	}
A
Arnd Bergmann 已提交
1829 1830 1831 1832

	return smi_msg;
}

1833 1834
static void smi_send(struct ipmi_smi *intf,
		     const struct ipmi_smi_handlers *handlers,
A
Arnd Bergmann 已提交
1835 1836 1837
		     struct ipmi_smi_msg *smi_msg, int priority)
{
	int run_to_completion = intf->run_to_completion;
1838
	unsigned long flags = 0;
A
Arnd Bergmann 已提交
1839

1840
	if (!run_to_completion)
A
Arnd Bergmann 已提交
1841
		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1842 1843 1844
	smi_msg = smi_add_send_msg(intf, smi_msg, priority);

	if (!run_to_completion)
1845 1846 1847
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);

	if (smi_msg)
1848
		handlers->sender(intf->send_info, smi_msg);
1849 1850
}

1851 1852 1853 1854 1855 1856 1857 1858
static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
{
	return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
		 && ((msg->cmd == IPMI_COLD_RESET_CMD)
		     || (msg->cmd == IPMI_WARM_RESET_CMD)))
		|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
}

1859
static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
C
Corey Minyard 已提交
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
			      struct ipmi_addr       *addr,
			      long                   msgid,
			      struct kernel_ipmi_msg *msg,
			      struct ipmi_smi_msg    *smi_msg,
			      struct ipmi_recv_msg   *recv_msg,
			      int                    retries,
			      unsigned int           retry_time_ms)
{
	struct ipmi_system_interface_addr *smi_addr;

	if (msg->netfn & 1)
		/* Responses are not allowed to the SMI. */
		return -EINVAL;

	smi_addr = (struct ipmi_system_interface_addr *) addr;
	if (smi_addr->lun > 3) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));

	if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
	    && ((msg->cmd == IPMI_SEND_MSG_CMD)
		|| (msg->cmd == IPMI_GET_MSG_CMD)
		|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
		/*
		 * We don't let the user do these, since we manage
		 * the sequence numbers.
		 */
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	if (is_maintenance_mode_cmd(msg)) {
		unsigned long flags;

		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
		intf->auto_maintenance_timeout
			= maintenance_mode_timeout_ms;
		if (!intf->maintenance_mode
		    && !intf->maintenance_mode_enable) {
			intf->maintenance_mode_enable = true;
			maintenance_mode_update(intf);
		}
		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
				       flags);
	}

	if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EMSGSIZE;
	}

	smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
	smi_msg->data[1] = msg->cmd;
	smi_msg->msgid = msgid;
	smi_msg->user_data = recv_msg;
	if (msg->data_len > 0)
		memcpy(&smi_msg->data[2], msg->data, msg->data_len);
	smi_msg->data_size = msg->data_len + 2;
	ipmi_inc_stat(intf, sent_local_commands);

	return 0;
}

1926
static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
C
Corey Minyard 已提交
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
			   struct ipmi_addr       *addr,
			   long                   msgid,
			   struct kernel_ipmi_msg *msg,
			   struct ipmi_smi_msg    *smi_msg,
			   struct ipmi_recv_msg   *recv_msg,
			   unsigned char          source_address,
			   unsigned char          source_lun,
			   int                    retries,
			   unsigned int           retry_time_ms)
{
	struct ipmi_ipmb_addr *ipmb_addr;
	unsigned char ipmb_seq;
	long seqid;
	int broadcast = 0;
	struct ipmi_channel *chans;
	int rv = 0;

	if (addr->channel >= IPMI_MAX_CHANNELS) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	chans = READ_ONCE(intf->channel_list)->c;

	if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
		/*
		 * Broadcasts add a zero at the beginning of the
		 * message, but otherwise is the same as an IPMB
		 * address.
		 */
		addr->addr_type = IPMI_IPMB_ADDR_TYPE;
		broadcast = 1;
		retries = 0; /* Don't retry broadcasts. */
	}

	/*
	 * 9 for the header and 1 for the checksum, plus
	 * possibly one for the broadcast.
	 */
	if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EMSGSIZE;
	}

	ipmb_addr = (struct ipmi_ipmb_addr *) addr;
	if (ipmb_addr->lun > 3) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));

	if (recv_msg->msg.netfn & 0x1) {
		/*
		 * It's a response, so use the user's sequence
		 * from msgid.
		 */
		ipmi_inc_stat(intf, sent_ipmb_responses);
		format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
				msgid, broadcast,
				source_address, source_lun);

		/*
		 * Save the receive message so we can use it
		 * to deliver the response.
		 */
		smi_msg->user_data = recv_msg;
	} else {
		/* It's a command, so get a sequence for it. */
		unsigned long flags;

		spin_lock_irqsave(&intf->seq_lock, flags);

		if (is_maintenance_mode_cmd(msg))
			intf->ipmb_maintenance_mode_timeout =
				maintenance_mode_timeout_ms;

		if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
			/* Different default in maintenance mode */
			retry_time_ms = default_maintenance_retry_ms;

		/*
		 * Create a sequence number with a 1 second
		 * timeout and 4 retries.
		 */
		rv = intf_next_seq(intf,
				   recv_msg,
				   retry_time_ms,
				   retries,
				   broadcast,
				   &ipmb_seq,
				   &seqid);
		if (rv)
			/*
			 * We have used up all the sequence numbers,
			 * probably, so abort.
			 */
			goto out_err;

		ipmi_inc_stat(intf, sent_ipmb_commands);

		/*
		 * Store the sequence number in the message,
		 * so that when the send message response
		 * comes back we can start the timer.
		 */
		format_ipmb_msg(smi_msg, msg, ipmb_addr,
				STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
				ipmb_seq, broadcast,
				source_address, source_lun);

		/*
		 * Copy the message into the recv message data, so we
		 * can retransmit it later if necessary.
		 */
		memcpy(recv_msg->msg_data, smi_msg->data,
		       smi_msg->data_size);
		recv_msg->msg.data = recv_msg->msg_data;
		recv_msg->msg.data_len = smi_msg->data_size;

		/*
		 * We don't unlock until here, because we need
		 * to copy the completed message into the
		 * recv_msg before we release the lock.
		 * Otherwise, race conditions may bite us.  I
		 * know that's pretty paranoid, but I prefer
		 * to be correct.
		 */
out_err:
		spin_unlock_irqrestore(&intf->seq_lock, flags);
	}

	return rv;
}

2067
static int i_ipmi_req_lan(struct ipmi_smi        *intf,
C
Corey Minyard 已提交
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
			  struct ipmi_addr       *addr,
			  long                   msgid,
			  struct kernel_ipmi_msg *msg,
			  struct ipmi_smi_msg    *smi_msg,
			  struct ipmi_recv_msg   *recv_msg,
			  unsigned char          source_lun,
			  int                    retries,
			  unsigned int           retry_time_ms)
{
	struct ipmi_lan_addr  *lan_addr;
	unsigned char ipmb_seq;
	long seqid;
	struct ipmi_channel *chans;
	int rv = 0;

	if (addr->channel >= IPMI_MAX_CHANNELS) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	chans = READ_ONCE(intf->channel_list)->c;

	if ((chans[addr->channel].medium
				!= IPMI_CHANNEL_MEDIUM_8023LAN)
			&& (chans[addr->channel].medium
			    != IPMI_CHANNEL_MEDIUM_ASYNC)) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	/* 11 for the header and 1 for the checksum. */
	if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EMSGSIZE;
	}

	lan_addr = (struct ipmi_lan_addr *) addr;
	if (lan_addr->lun > 3) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));

	if (recv_msg->msg.netfn & 0x1) {
		/*
		 * It's a response, so use the user's sequence
		 * from msgid.
		 */
		ipmi_inc_stat(intf, sent_lan_responses);
		format_lan_msg(smi_msg, msg, lan_addr, msgid,
			       msgid, source_lun);

		/*
		 * Save the receive message so we can use it
		 * to deliver the response.
		 */
		smi_msg->user_data = recv_msg;
	} else {
		/* It's a command, so get a sequence for it. */
		unsigned long flags;

		spin_lock_irqsave(&intf->seq_lock, flags);

		/*
		 * Create a sequence number with a 1 second
		 * timeout and 4 retries.
		 */
		rv = intf_next_seq(intf,
				   recv_msg,
				   retry_time_ms,
				   retries,
				   0,
				   &ipmb_seq,
				   &seqid);
		if (rv)
			/*
			 * We have used up all the sequence numbers,
			 * probably, so abort.
			 */
			goto out_err;

		ipmi_inc_stat(intf, sent_lan_commands);

		/*
		 * Store the sequence number in the message,
		 * so that when the send message response
		 * comes back we can start the timer.
		 */
		format_lan_msg(smi_msg, msg, lan_addr,
			       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
			       ipmb_seq, source_lun);

		/*
		 * Copy the message into the recv message data, so we
		 * can retransmit it later if necessary.
		 */
		memcpy(recv_msg->msg_data, smi_msg->data,
		       smi_msg->data_size);
		recv_msg->msg.data = recv_msg->msg_data;
		recv_msg->msg.data_len = smi_msg->data_size;

		/*
		 * We don't unlock until here, because we need
		 * to copy the completed message into the
		 * recv_msg before we release the lock.
		 * Otherwise, race conditions may bite us.  I
		 * know that's pretty paranoid, but I prefer
		 * to be correct.
		 */
out_err:
		spin_unlock_irqrestore(&intf->seq_lock, flags);
	}

	return rv;
}

2185 2186 2187 2188 2189 2190
/*
 * Separate from ipmi_request so that the user does not have to be
 * supplied in certain circumstances (mainly at panic time).  If
 * messages are supplied, they will be freed, even if an error
 * occurs.
 */
2191
static int i_ipmi_request(struct ipmi_user     *user,
2192
			  struct ipmi_smi      *intf,
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
			  struct ipmi_addr     *addr,
			  long                 msgid,
			  struct kernel_ipmi_msg *msg,
			  void                 *user_msg_data,
			  void                 *supplied_smi,
			  struct ipmi_recv_msg *supplied_recv,
			  int                  priority,
			  unsigned char        source_address,
			  unsigned char        source_lun,
			  int                  retries,
			  unsigned int         retry_time_ms)
L
Linus Torvalds 已提交
2204
{
C
Corey Minyard 已提交
2205 2206 2207
	struct ipmi_smi_msg *smi_msg;
	struct ipmi_recv_msg *recv_msg;
	int rv = 0;
L
Linus Torvalds 已提交
2208

2209
	if (supplied_recv)
L
Linus Torvalds 已提交
2210
		recv_msg = supplied_recv;
2211
	else {
L
Linus Torvalds 已提交
2212
		recv_msg = ipmi_alloc_recv_msg();
2213 2214 2215 2216
		if (recv_msg == NULL) {
			rv = -ENOMEM;
			goto out;
		}
L
Linus Torvalds 已提交
2217 2218 2219
	}
	recv_msg->user_msg_data = user_msg_data;

2220
	if (supplied_smi)
L
Linus Torvalds 已提交
2221
		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2222
	else {
L
Linus Torvalds 已提交
2223 2224
		smi_msg = ipmi_alloc_smi_msg();
		if (smi_msg == NULL) {
2225 2226
			if (!supplied_recv)
				ipmi_free_recv_msg(recv_msg);
2227 2228
			rv = -ENOMEM;
			goto out;
L
Linus Torvalds 已提交
2229 2230 2231
		}
	}

2232
	rcu_read_lock();
2233
	if (intf->in_shutdown) {
2234 2235 2236 2237
		rv = -ENODEV;
		goto out_err;
	}

L
Linus Torvalds 已提交
2238
	recv_msg->user = user;
2239
	if (user)
2240
		/* The put happens when the message is freed. */
2241
		kref_get(&user->refcount);
L
Linus Torvalds 已提交
2242
	recv_msg->msgid = msgid;
2243 2244 2245 2246
	/*
	 * Store the message to send in the receive message so timeout
	 * responses can get the proper response data.
	 */
L
Linus Torvalds 已提交
2247 2248 2249
	recv_msg->msg = *msg;

	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
C
Corey Minyard 已提交
2250 2251
		rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
					recv_msg, retries, retry_time_ms);
2252
	} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
C
Corey Minyard 已提交
2253 2254 2255
		rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
				     source_address, source_lun,
				     retries, retry_time_ms);
2256
	} else if (is_lan_addr(addr)) {
C
Corey Minyard 已提交
2257 2258
		rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
				    source_lun, retries, retry_time_ms);
L
Linus Torvalds 已提交
2259 2260
	} else {
	    /* Unknown address type. */
2261
		ipmi_inc_stat(intf, sent_invalid_commands);
L
Linus Torvalds 已提交
2262 2263 2264
		rv = -EINVAL;
	}

C
Corey Minyard 已提交
2265 2266 2267 2268 2269 2270
	if (rv) {
out_err:
		ipmi_free_smi_msg(smi_msg);
		ipmi_free_recv_msg(recv_msg);
	} else {
		ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
2271

C
Corey Minyard 已提交
2272 2273
		smi_send(intf, intf->handlers, smi_msg, priority);
	}
2274
	rcu_read_unlock();
L
Linus Torvalds 已提交
2275

2276
out:
L
Linus Torvalds 已提交
2277 2278 2279
	return rv;
}

2280
static int check_addr(struct ipmi_smi  *intf,
2281 2282 2283 2284 2285 2286
		      struct ipmi_addr *addr,
		      unsigned char    *saddr,
		      unsigned char    *lun)
{
	if (addr->channel >= IPMI_MAX_CHANNELS)
		return -EINVAL;
2287
	addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2288 2289
	*lun = intf->addrinfo[addr->channel].lun;
	*saddr = intf->addrinfo[addr->channel].address;
2290 2291 2292
	return 0;
}

2293
int ipmi_request_settime(struct ipmi_user *user,
L
Linus Torvalds 已提交
2294 2295 2296 2297 2298 2299 2300 2301
			 struct ipmi_addr *addr,
			 long             msgid,
			 struct kernel_ipmi_msg  *msg,
			 void             *user_msg_data,
			 int              priority,
			 int              retries,
			 unsigned int     retry_time_ms)
{
2302
	unsigned char saddr = 0, lun = 0;
2303
	int rv, index;
2304

2305
	if (!user)
2306
		return -EINVAL;
2307 2308 2309 2310 2311

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

2312
	rv = check_addr(user->intf, addr, &saddr, &lun);
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
	if (!rv)
		rv = i_ipmi_request(user,
				    user->intf,
				    addr,
				    msgid,
				    msg,
				    user_msg_data,
				    NULL, NULL,
				    priority,
				    saddr,
				    lun,
				    retries,
				    retry_time_ms);

	release_ipmi_user(user, index);
	return rv;
L
Linus Torvalds 已提交
2329
}
2330
EXPORT_SYMBOL(ipmi_request_settime);
L
Linus Torvalds 已提交
2331

2332
int ipmi_request_supply_msgs(struct ipmi_user     *user,
L
Linus Torvalds 已提交
2333 2334 2335 2336 2337 2338 2339 2340
			     struct ipmi_addr     *addr,
			     long                 msgid,
			     struct kernel_ipmi_msg *msg,
			     void                 *user_msg_data,
			     void                 *supplied_smi,
			     struct ipmi_recv_msg *supplied_recv,
			     int                  priority)
{
2341
	unsigned char saddr = 0, lun = 0;
2342
	int rv, index;
2343

2344
	if (!user)
2345
		return -EINVAL;
2346 2347 2348 2349 2350

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

2351
	rv = check_addr(user->intf, addr, &saddr, &lun);
2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
	if (!rv)
		rv = i_ipmi_request(user,
				    user->intf,
				    addr,
				    msgid,
				    msg,
				    user_msg_data,
				    supplied_smi,
				    supplied_recv,
				    priority,
				    saddr,
				    lun,
				    -1, 0);

	release_ipmi_user(user, index);
	return rv;
L
Linus Torvalds 已提交
2368
}
2369
EXPORT_SYMBOL(ipmi_request_supply_msgs);
L
Linus Torvalds 已提交
2370

2371 2372
static void bmc_device_id_handler(struct ipmi_smi *intf,
				  struct ipmi_recv_msg *msg)
2373 2374 2375 2376 2377 2378
{
	int rv;

	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
			|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
			|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2379
		dev_warn(intf->si_dev,
2380 2381
			 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
			 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2382 2383 2384 2385 2386 2387
		return;
	}

	rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
			msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
	if (rv) {
2388
		dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402
		intf->bmc->dyn_id_set = 0;
	} else {
		/*
		 * Make sure the id data is available before setting
		 * dyn_id_set.
		 */
		smp_wmb();
		intf->bmc->dyn_id_set = 1;
	}

	wake_up(&intf->waitq);
}

static int
2403
send_get_device_id_cmd(struct ipmi_smi *intf)
2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
{
	struct ipmi_system_interface_addr si;
	struct kernel_ipmi_msg msg;

	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	si.channel = IPMI_BMC_CHANNEL;
	si.lun = 0;

	msg.netfn = IPMI_NETFN_APP_REQUEST;
	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
	msg.data = NULL;
	msg.data_len = 0;

	return i_ipmi_request(NULL,
			      intf,
			      (struct ipmi_addr *) &si,
			      0,
			      &msg,
			      intf,
			      NULL,
			      NULL,
			      0,
2426 2427
			      intf->addrinfo[0].address,
			      intf->addrinfo[0].lun,
2428 2429 2430
			      -1, 0);
}

2431
static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464
{
	int rv;

	bmc->dyn_id_set = 2;

	intf->null_user_handler = bmc_device_id_handler;

	rv = send_get_device_id_cmd(intf);
	if (rv)
		return rv;

	wait_event(intf->waitq, bmc->dyn_id_set != 2);

	if (!bmc->dyn_id_set)
		rv = -EIO; /* Something went wrong in the fetch. */

	/* dyn_id_set makes the id data available. */
	smp_rmb();

	intf->null_user_handler = NULL;

	return rv;
}

/*
 * Fetch the device id for the bmc/interface.  You must pass in either
 * bmc or intf, this code will get the other one.  If the data has
 * been recently fetched, this will just use the cached data.  Otherwise
 * it will run a new fetch.
 *
 * Except for the first time this is called (in ipmi_register_smi()),
 * this will always return good data;
 */
2465
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2466
			       struct ipmi_device_id *id,
2467
			       bool *guid_set, guid_t *guid, int intf_num)
2468
{
2469
	int rv = 0;
2470
	int prev_dyn_id_set, prev_guid_set;
2471
	bool intf_set = intf != NULL;
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493

	if (!intf) {
		mutex_lock(&bmc->dyn_mutex);
retry_bmc_lock:
		if (list_empty(&bmc->intfs)) {
			mutex_unlock(&bmc->dyn_mutex);
			return -ENOENT;
		}
		intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
					bmc_link);
		kref_get(&intf->refcount);
		mutex_unlock(&bmc->dyn_mutex);
		mutex_lock(&intf->bmc_reg_mutex);
		mutex_lock(&bmc->dyn_mutex);
		if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
					     bmc_link)) {
			mutex_unlock(&intf->bmc_reg_mutex);
			kref_put(&intf->refcount, intf_free);
			goto retry_bmc_lock;
		}
	} else {
		mutex_lock(&intf->bmc_reg_mutex);
2494
		bmc = intf->bmc;
2495 2496 2497
		mutex_lock(&bmc->dyn_mutex);
		kref_get(&intf->refcount);
	}
2498

2499
	/* If we have a valid and current ID, just return that. */
2500 2501 2502
	if (intf->in_bmc_register ||
	    (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
		goto out_noprocessing;
2503

2504 2505 2506 2507
	prev_guid_set = bmc->dyn_guid_set;
	__get_guid(intf);

	prev_dyn_id_set = bmc->dyn_id_set;
2508 2509 2510 2511
	rv = __get_device_id(intf, bmc);
	if (rv)
		goto out;

2512 2513 2514 2515 2516 2517 2518 2519
	/*
	 * The guid, device id, manufacturer id, and product id should
	 * not change on a BMC.  If it does we have to do some dancing.
	 */
	if (!intf->bmc_registered
	    || (!prev_guid_set && bmc->dyn_guid_set)
	    || (!prev_dyn_id_set && bmc->dyn_id_set)
	    || (prev_guid_set && bmc->dyn_guid_set
2520
		&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
2521 2522 2523 2524 2525
	    || bmc->id.device_id != bmc->fetch_id.device_id
	    || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
	    || bmc->id.product_id != bmc->fetch_id.product_id) {
		struct ipmi_device_id id = bmc->fetch_id;
		int guid_set = bmc->dyn_guid_set;
2526
		guid_t guid;
2527

2528
		guid = bmc->fetch_guid;
2529 2530 2531 2532 2533 2534
		mutex_unlock(&bmc->dyn_mutex);

		__ipmi_bmc_unregister(intf);
		/* Fill in the temporary BMC for good measure. */
		intf->bmc->id = id;
		intf->bmc->dyn_guid_set = guid_set;
2535 2536
		intf->bmc->guid = guid;
		if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2537
			need_waiter(intf); /* Retry later on an error. */
2538 2539 2540
		else
			__scan_channels(intf, &id);

2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556

		if (!intf_set) {
			/*
			 * We weren't given the interface on the
			 * command line, so restart the operation on
			 * the next interface for the BMC.
			 */
			mutex_unlock(&intf->bmc_reg_mutex);
			mutex_lock(&bmc->dyn_mutex);
			goto retry_bmc_lock;
		}

		/* We have a new BMC, set it up. */
		bmc = intf->bmc;
		mutex_lock(&bmc->dyn_mutex);
		goto out_noprocessing;
2557 2558 2559
	} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
		/* Version info changes, scan the channels again. */
		__scan_channels(intf, &bmc->fetch_id);
2560 2561 2562 2563 2564 2565 2566 2567

	bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;

out:
	if (rv && prev_dyn_id_set) {
		rv = 0; /* Ignore failures if we have previous data. */
		bmc->dyn_id_set = prev_dyn_id_set;
	}
2568 2569 2570
	if (!rv) {
		bmc->id = bmc->fetch_id;
		if (bmc->dyn_guid_set)
2571
			bmc->guid = bmc->fetch_guid;
2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582
		else if (prev_guid_set)
			/*
			 * The guid used to be valid and it failed to fetch,
			 * just use the cached value.
			 */
			bmc->dyn_guid_set = prev_guid_set;
	}
out_noprocessing:
	if (!rv) {
		if (id)
			*id = bmc->id;
2583

2584 2585
		if (guid_set)
			*guid_set = bmc->dyn_guid_set;
2586

2587
		if (guid && bmc->dyn_guid_set)
2588
			*guid =  bmc->guid;
2589
	}
2590

2591 2592 2593 2594 2595
	mutex_unlock(&bmc->dyn_mutex);
	mutex_unlock(&intf->bmc_reg_mutex);

	kref_put(&intf->refcount, intf_free);
	return rv;
2596 2597
}

2598
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2599
			     struct ipmi_device_id *id,
2600
			     bool *guid_set, guid_t *guid)
2601 2602 2603 2604
{
	return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
}

2605 2606 2607 2608
static ssize_t device_id_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
2609
	struct bmc_device *bmc = to_bmc_device(dev);
2610 2611 2612
	struct ipmi_device_id id;
	int rv;

2613
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2614 2615
	if (rv)
		return rv;
2616

2617
	return snprintf(buf, 10, "%u\n", id.device_id);
2618
}
J
Joe Perches 已提交
2619
static DEVICE_ATTR_RO(device_id);
2620

2621 2622 2623
static ssize_t provides_device_sdrs_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
2624
{
2625
	struct bmc_device *bmc = to_bmc_device(dev);
2626 2627
	struct ipmi_device_id id;
	int rv;
2628

2629
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2630 2631 2632 2633
	if (rv)
		return rv;

	return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2634
}
J
Joe Perches 已提交
2635
static DEVICE_ATTR_RO(provides_device_sdrs);
2636 2637 2638 2639

static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
2640
	struct bmc_device *bmc = to_bmc_device(dev);
2641 2642
	struct ipmi_device_id id;
	int rv;
2643

2644
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2645 2646 2647 2648
	if (rv)
		return rv;

	return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2649
}
J
Joe Perches 已提交
2650
static DEVICE_ATTR_RO(revision);
2651

2652 2653 2654
static ssize_t firmware_revision_show(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
2655
{
2656
	struct bmc_device *bmc = to_bmc_device(dev);
2657 2658
	struct ipmi_device_id id;
	int rv;
2659

2660
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2661 2662 2663 2664 2665
	if (rv)
		return rv;

	return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
			id.firmware_revision_2);
2666
}
J
Joe Perches 已提交
2667
static DEVICE_ATTR_RO(firmware_revision);
2668 2669 2670 2671 2672

static ssize_t ipmi_version_show(struct device *dev,
				 struct device_attribute *attr,
				 char *buf)
{
2673
	struct bmc_device *bmc = to_bmc_device(dev);
2674 2675 2676
	struct ipmi_device_id id;
	int rv;

2677
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2678 2679
	if (rv)
		return rv;
2680 2681

	return snprintf(buf, 20, "%u.%u\n",
2682 2683
			ipmi_version_major(&id),
			ipmi_version_minor(&id));
2684
}
J
Joe Perches 已提交
2685
static DEVICE_ATTR_RO(ipmi_version);
2686 2687 2688 2689 2690

static ssize_t add_dev_support_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
2691
	struct bmc_device *bmc = to_bmc_device(dev);
2692 2693
	struct ipmi_device_id id;
	int rv;
2694

2695
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2696 2697 2698 2699
	if (rv)
		return rv;

	return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2700
}
2701 2702
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
		   NULL);
2703 2704 2705 2706 2707

static ssize_t manufacturer_id_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
2708
	struct bmc_device *bmc = to_bmc_device(dev);
2709 2710 2711
	struct ipmi_device_id id;
	int rv;

2712
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2713 2714
	if (rv)
		return rv;
2715

2716
	return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2717
}
J
Joe Perches 已提交
2718
static DEVICE_ATTR_RO(manufacturer_id);
2719 2720 2721 2722 2723

static ssize_t product_id_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
2724
	struct bmc_device *bmc = to_bmc_device(dev);
2725 2726 2727
	struct ipmi_device_id id;
	int rv;

2728
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2729 2730
	if (rv)
		return rv;
2731

2732
	return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2733
}
J
Joe Perches 已提交
2734
static DEVICE_ATTR_RO(product_id);
2735 2736 2737 2738 2739

static ssize_t aux_firmware_rev_show(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
2740
	struct bmc_device *bmc = to_bmc_device(dev);
2741 2742 2743
	struct ipmi_device_id id;
	int rv;

2744
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2745 2746
	if (rv)
		return rv;
2747 2748

	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2749 2750 2751 2752
			id.aux_firmware_revision[3],
			id.aux_firmware_revision[2],
			id.aux_firmware_revision[1],
			id.aux_firmware_revision[0]);
2753
}
2754
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2755 2756 2757 2758

static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
2759
	struct bmc_device *bmc = to_bmc_device(dev);
2760
	bool guid_set;
2761
	guid_t guid;
2762 2763
	int rv;

2764
	rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2765 2766 2767 2768
	if (rv)
		return rv;
	if (!guid_set)
		return -ENOENT;
2769

2770
	return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
2771
}
J
Joe Perches 已提交
2772
static DEVICE_ATTR_RO(guid);
2773 2774 2775 2776 2777 2778 2779 2780 2781 2782

static struct attribute *bmc_dev_attrs[] = {
	&dev_attr_device_id.attr,
	&dev_attr_provides_device_sdrs.attr,
	&dev_attr_revision.attr,
	&dev_attr_firmware_revision.attr,
	&dev_attr_ipmi_version.attr,
	&dev_attr_additional_device_support.attr,
	&dev_attr_manufacturer_id.attr,
	&dev_attr_product_id.attr,
2783 2784
	&dev_attr_aux_firmware_revision.attr,
	&dev_attr_guid.attr,
2785 2786
	NULL
};
2787

2788 2789 2790 2791 2792 2793
static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
				       struct attribute *attr, int idx)
{
	struct device *dev = kobj_to_dev(kobj);
	struct bmc_device *bmc = to_bmc_device(dev);
	umode_t mode = attr->mode;
2794
	int rv;
2795

2796
	if (attr == &dev_attr_aux_firmware_revision.attr) {
2797 2798 2799
		struct ipmi_device_id id;

		rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2800 2801
		return (!rv && id.aux_firmware_revision_set) ? mode : 0;
	}
2802 2803 2804 2805 2806 2807
	if (attr == &dev_attr_guid.attr) {
		bool guid_set;

		rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
		return (!rv && guid_set) ? mode : 0;
	}
2808 2809 2810
	return mode;
}

2811
static const struct attribute_group bmc_dev_attr_group = {
2812
	.attrs		= bmc_dev_attrs,
2813
	.is_visible	= bmc_dev_attr_is_visible,
2814
};
J
Jeff Garzik 已提交
2815

2816 2817 2818 2819 2820
static const struct attribute_group *bmc_dev_attr_groups[] = {
	&bmc_dev_attr_group,
	NULL
};

2821
static const struct device_type bmc_device_type = {
2822 2823 2824
	.groups		= bmc_dev_attr_groups,
};

2825
static int __find_bmc_guid(struct device *dev, const void *data)
2826
{
2827
	const guid_t *guid = data;
2828 2829
	struct bmc_device *bmc;
	int rv;
2830

2831 2832 2833
	if (dev->type != &bmc_device_type)
		return 0;

2834
	bmc = to_bmc_device(dev);
2835
	rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2836 2837 2838
	if (rv)
		rv = kref_get_unless_zero(&bmc->usecount);
	return rv;
2839 2840
}

2841
/*
2842
 * Returns with the bmc's usecount incremented, if it is non-NULL.
2843
 */
2844
static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2845
					     guid_t *guid)
2846 2847
{
	struct device *dev;
2848
	struct bmc_device *bmc = NULL;
2849 2850

	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2851 2852 2853 2854 2855
	if (dev) {
		bmc = to_bmc_device(dev);
		put_device(dev);
	}
	return bmc;
2856 2857 2858 2859 2860 2861 2862
}

struct prod_dev_id {
	unsigned int  product_id;
	unsigned char device_id;
};

2863
static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2864
{
2865
	const struct prod_dev_id *cid = data;
2866
	struct bmc_device *bmc;
2867
	int rv;
2868 2869 2870

	if (dev->type != &bmc_device_type)
		return 0;
2871

2872
	bmc = to_bmc_device(dev);
2873 2874
	rv = (bmc->id.product_id == cid->product_id
	      && bmc->id.device_id == cid->device_id);
2875
	if (rv)
2876 2877
		rv = kref_get_unless_zero(&bmc->usecount);
	return rv;
2878 2879
}

2880
/*
2881
 * Returns with the bmc's usecount incremented, if it is non-NULL.
2882
 */
2883 2884 2885 2886 2887 2888 2889 2890 2891
static struct bmc_device *ipmi_find_bmc_prod_dev_id(
	struct device_driver *drv,
	unsigned int product_id, unsigned char device_id)
{
	struct prod_dev_id id = {
		.product_id = product_id,
		.device_id = device_id,
	};
	struct device *dev;
2892
	struct bmc_device *bmc = NULL;
2893 2894

	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2895 2896 2897 2898 2899
	if (dev) {
		bmc = to_bmc_device(dev);
		put_device(dev);
	}
	return bmc;
2900 2901
}

2902 2903
static DEFINE_IDA(ipmi_bmc_ida);

2904 2905 2906 2907
static void
release_bmc_device(struct device *dev)
{
	kfree(to_bmc_device(dev));
J
Jeff Garzik 已提交
2908 2909
}

2910
static void cleanup_bmc_work(struct work_struct *work)
J
Jeff Garzik 已提交
2911
{
2912 2913
	struct bmc_device *bmc = container_of(work, struct bmc_device,
					      remove_work);
2914
	int id = bmc->pdev.id; /* Unregister overwrites id */
J
Jeff Garzik 已提交
2915

2916
	platform_device_unregister(&bmc->pdev);
2917
	ida_simple_remove(&ipmi_bmc_ida, id);
2918 2919
}

2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
static void
cleanup_bmc_device(struct kref *ref)
{
	struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);

	/*
	 * Remove the platform device in a work queue to avoid issues
	 * with removing the device attributes while reading a device
	 * attribute.
	 */
	schedule_work(&bmc->remove_work);
}

/*
 * Must be called with intf->bmc_reg_mutex held.
 */
2936
static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2937 2938 2939
{
	struct bmc_device *bmc = intf->bmc;

C
Corey Minyard 已提交
2940 2941 2942
	if (!intf->bmc_registered)
		return;

2943
	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
C
Corey Minyard 已提交
2944 2945 2946
	sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
	kfree(intf->my_dev_name);
	intf->my_dev_name = NULL;
2947

2948
	mutex_lock(&bmc->dyn_mutex);
2949
	list_del(&intf->bmc_link);
2950
	mutex_unlock(&bmc->dyn_mutex);
2951
	intf->bmc = &intf->tmp_bmc;
2952
	kref_put(&bmc->usecount, cleanup_bmc_device);
C
Corey Minyard 已提交
2953
	intf->bmc_registered = false;
2954
}
2955

2956
static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2957 2958 2959
{
	mutex_lock(&intf->bmc_reg_mutex);
	__ipmi_bmc_unregister(intf);
2960
	mutex_unlock(&intf->bmc_reg_mutex);
2961 2962
}

2963 2964 2965
/*
 * Must be called with intf->bmc_reg_mutex held.
 */
2966
static int __ipmi_bmc_register(struct ipmi_smi *intf,
2967
			       struct ipmi_device_id *id,
2968
			       bool guid_set, guid_t *guid, int intf_num)
2969 2970
{
	int               rv;
2971
	struct bmc_device *bmc;
2972 2973
	struct bmc_device *old_bmc;

2974 2975 2976 2977 2978 2979 2980 2981 2982
	/*
	 * platform_device_register() can cause bmc_reg_mutex to
	 * be claimed because of the is_visible functions of
	 * the attributes.  Eliminate possible recursion and
	 * release the lock.
	 */
	intf->in_bmc_register = true;
	mutex_unlock(&intf->bmc_reg_mutex);

2983 2984 2985 2986
	/*
	 * Try to find if there is an bmc_device struct
	 * representing the interfaced BMC already
	 */
2987
	mutex_lock(&ipmidriver_mutex);
2988 2989
	if (guid_set)
		old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2990
	else
2991
		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2992 2993
						    id->product_id,
						    id->device_id);
2994 2995 2996 2997 2998 2999

	/*
	 * If there is already an bmc_device, free the new one,
	 * otherwise register the new BMC device
	 */
	if (old_bmc) {
3000
		bmc = old_bmc;
3001 3002 3003 3004
		/*
		 * Note: old_bmc already has usecount incremented by
		 * the BMC find functions.
		 */
3005
		intf->bmc = old_bmc;
3006
		mutex_lock(&bmc->dyn_mutex);
3007
		list_add_tail(&intf->bmc_link, &bmc->intfs);
3008
		mutex_unlock(&bmc->dyn_mutex);
3009

3010
		dev_info(intf->si_dev,
3011
			 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3012 3013 3014
			 bmc->id.manufacturer_id,
			 bmc->id.product_id,
			 bmc->id.device_id);
3015
	} else {
3016 3017 3018 3019 3020 3021 3022
		bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
		if (!bmc) {
			rv = -ENOMEM;
			goto out;
		}
		INIT_LIST_HEAD(&bmc->intfs);
		mutex_init(&bmc->dyn_mutex);
3023 3024 3025 3026 3027
		INIT_WORK(&bmc->remove_work, cleanup_bmc_work);

		bmc->id = *id;
		bmc->dyn_id_set = 1;
		bmc->dyn_guid_set = guid_set;
3028
		bmc->guid = *guid;
3029
		bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3030

3031
		bmc->pdev.name = "ipmi_bmc";
3032

3033 3034 3035
		rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
		if (rv < 0)
			goto out;
3036
		bmc->pdev.dev.driver = &ipmidriver.driver;
3037
		bmc->pdev.id = rv;
3038 3039
		bmc->pdev.dev.release = release_bmc_device;
		bmc->pdev.dev.type = &bmc_device_type;
3040
		kref_init(&bmc->usecount);
3041

3042 3043
		intf->bmc = bmc;
		mutex_lock(&bmc->dyn_mutex);
3044
		list_add_tail(&intf->bmc_link, &bmc->intfs);
3045 3046 3047
		mutex_unlock(&bmc->dyn_mutex);

		rv = platform_device_register(&bmc->pdev);
3048
		if (rv) {
3049
			dev_err(intf->si_dev,
3050
				"Unable to register bmc device: %d\n",
3051
				rv);
C
Corey Minyard 已提交
3052
			goto out_list_del;
3053 3054
		}

3055 3056
		dev_info(intf->si_dev,
			 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3057 3058 3059
			 bmc->id.manufacturer_id,
			 bmc->id.product_id,
			 bmc->id.device_id);
3060 3061 3062 3063 3064 3065
	}

	/*
	 * create symlink from system interface device to bmc device
	 * and back.
	 */
3066
	rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3067
	if (rv) {
3068
		dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
C
Corey Minyard 已提交
3069
		goto out_put_bmc;
3070 3071
	}

3072 3073 3074
	if (intf_num == -1)
		intf_num = intf->intf_num;
	intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3075 3076
	if (!intf->my_dev_name) {
		rv = -ENOMEM;
3077 3078
		dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
			rv);
C
Corey Minyard 已提交
3079
		goto out_unlink1;
3080 3081
	}

3082
	rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3083 3084 3085 3086
			       intf->my_dev_name);
	if (rv) {
		kfree(intf->my_dev_name);
		intf->my_dev_name = NULL;
3087 3088
		dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
			rv);
C
Corey Minyard 已提交
3089
		goto out_free_my_dev_name;
3090 3091
	}

C
Corey Minyard 已提交
3092
	intf->bmc_registered = true;
3093

C
Corey Minyard 已提交
3094
out:
3095 3096 3097
	mutex_unlock(&ipmidriver_mutex);
	mutex_lock(&intf->bmc_reg_mutex);
	intf->in_bmc_register = false;
3098
	return rv;
C
Corey Minyard 已提交
3099 3100 3101 3102 3103 3104 3105 3106 3107 3108


out_free_my_dev_name:
	kfree(intf->my_dev_name);
	intf->my_dev_name = NULL;

out_unlink1:
	sysfs_remove_link(&intf->si_dev->kobj, "bmc");

out_put_bmc:
3109
	mutex_lock(&bmc->dyn_mutex);
3110
	list_del(&intf->bmc_link);
3111
	mutex_unlock(&bmc->dyn_mutex);
3112
	intf->bmc = &intf->tmp_bmc;
C
Corey Minyard 已提交
3113 3114 3115 3116
	kref_put(&bmc->usecount, cleanup_bmc_device);
	goto out;

out_list_del:
3117
	mutex_lock(&bmc->dyn_mutex);
3118
	list_del(&intf->bmc_link);
3119
	mutex_unlock(&bmc->dyn_mutex);
3120
	intf->bmc = &intf->tmp_bmc;
C
Corey Minyard 已提交
3121 3122
	put_device(&bmc->pdev.dev);
	goto out;
3123 3124 3125
}

static int
3126
send_guid_cmd(struct ipmi_smi *intf, int chan)
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147
{
	struct kernel_ipmi_msg            msg;
	struct ipmi_system_interface_addr si;

	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	si.channel = IPMI_BMC_CHANNEL;
	si.lun = 0;

	msg.netfn = IPMI_NETFN_APP_REQUEST;
	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
	msg.data = NULL;
	msg.data_len = 0;
	return i_ipmi_request(NULL,
			      intf,
			      (struct ipmi_addr *) &si,
			      0,
			      &msg,
			      intf,
			      NULL,
			      NULL,
			      0,
3148 3149
			      intf->addrinfo[0].address,
			      intf->addrinfo[0].lun,
3150 3151 3152
			      -1, 0);
}

3153
static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3154
{
3155 3156
	struct bmc_device *bmc = intf->bmc;

3157 3158 3159 3160 3161 3162 3163 3164
	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
		/* Not for me */
		return;

	if (msg->msg.data[0] != 0) {
		/* Error from getting the GUID, the BMC doesn't have one. */
3165
		bmc->dyn_guid_set = 0;
3166 3167 3168
		goto out;
	}

3169
	if (msg->msg.data_len < UUID_SIZE + 1) {
3170
		bmc->dyn_guid_set = 0;
3171
		dev_warn(intf->si_dev,
3172 3173
			 "The GUID response from the BMC was too short, it was %d but should have been %d.  Assuming GUID is not available.\n",
			 msg->msg.data_len, UUID_SIZE + 1);
3174 3175 3176
		goto out;
	}

3177
	guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1));
3178 3179 3180 3181 3182 3183
	/*
	 * Make sure the guid data is available before setting
	 * dyn_guid_set.
	 */
	smp_wmb();
	bmc->dyn_guid_set = 1;
3184 3185 3186 3187
 out:
	wake_up(&intf->waitq);
}

3188
static void __get_guid(struct ipmi_smi *intf)
3189 3190
{
	int rv;
3191
	struct bmc_device *bmc = intf->bmc;
3192

3193
	bmc->dyn_guid_set = 2;
3194 3195 3196 3197
	intf->null_user_handler = guid_handler;
	rv = send_guid_cmd(intf, 0);
	if (rv)
		/* Send failed, no GUID available. */
3198 3199 3200 3201 3202 3203 3204
		bmc->dyn_guid_set = 0;

	wait_event(intf->waitq, bmc->dyn_guid_set != 2);

	/* dyn_guid_set makes the guid data available. */
	smp_rmb();

3205 3206 3207
	intf->null_user_handler = NULL;
}

L
Linus Torvalds 已提交
3208
static int
3209
send_channel_info_cmd(struct ipmi_smi *intf, int chan)
L
Linus Torvalds 已提交
3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
{
	struct kernel_ipmi_msg            msg;
	unsigned char                     data[1];
	struct ipmi_system_interface_addr si;

	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	si.channel = IPMI_BMC_CHANNEL;
	si.lun = 0;

	msg.netfn = IPMI_NETFN_APP_REQUEST;
	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
	msg.data = data;
	msg.data_len = 1;
	data[0] = chan;
	return i_ipmi_request(NULL,
			      intf,
			      (struct ipmi_addr *) &si,
			      0,
			      &msg,
3229
			      intf,
L
Linus Torvalds 已提交
3230 3231 3232
			      NULL,
			      NULL,
			      0,
3233 3234
			      intf->addrinfo[0].address,
			      intf->addrinfo[0].lun,
L
Linus Torvalds 已提交
3235 3236 3237 3238
			      -1, 0);
}

static void
3239
channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
L
Linus Torvalds 已提交
3240 3241
{
	int rv = 0;
3242 3243 3244
	int ch;
	unsigned int set = intf->curr_working_cset;
	struct ipmi_channel *chans;
L
Linus Torvalds 已提交
3245

3246 3247
	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3248
	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
L
Linus Torvalds 已提交
3249
		/* It's the one we want */
3250
		if (msg->msg.data[0] != 0) {
L
Linus Torvalds 已提交
3251 3252
			/* Got an error from the channel, just go on. */

3253
			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3254 3255 3256 3257 3258 3259
				/*
				 * If the MC does not support this
				 * command, that is legal.  We just
				 * assume it has one IPMB at channel
				 * zero.
				 */
3260
				intf->wchannels[set].c[0].medium
L
Linus Torvalds 已提交
3261
					= IPMI_CHANNEL_MEDIUM_IPMB;
3262
				intf->wchannels[set].c[0].protocol
L
Linus Torvalds 已提交
3263 3264
					= IPMI_CHANNEL_PROTOCOL_IPMB;

3265 3266
				intf->channel_list = intf->wchannels + set;
				intf->channels_ready = true;
L
Linus Torvalds 已提交
3267 3268 3269 3270 3271
				wake_up(&intf->waitq);
				goto out;
			}
			goto next_channel;
		}
3272
		if (msg->msg.data_len < 4) {
L
Linus Torvalds 已提交
3273 3274 3275
			/* Message not big enough, just go on. */
			goto next_channel;
		}
3276 3277 3278 3279
		ch = intf->curr_channel;
		chans = intf->wchannels[set].c;
		chans[ch].medium = msg->msg.data[2] & 0x7f;
		chans[ch].protocol = msg->msg.data[3] & 0x1f;
L
Linus Torvalds 已提交
3280

3281
 next_channel:
L
Linus Torvalds 已提交
3282
		intf->curr_channel++;
3283 3284 3285
		if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
			intf->channel_list = intf->wchannels + set;
			intf->channels_ready = true;
L
Linus Torvalds 已提交
3286
			wake_up(&intf->waitq);
3287 3288 3289
		} else {
			intf->channel_list = intf->wchannels + set;
			intf->channels_ready = true;
L
Linus Torvalds 已提交
3290
			rv = send_channel_info_cmd(intf, intf->curr_channel);
3291
		}
L
Linus Torvalds 已提交
3292 3293 3294

		if (rv) {
			/* Got an error somehow, just give up. */
3295
			dev_warn(intf->si_dev,
3296
				 "Error sending channel information for channel %d: %d\n",
3297
				 intf->curr_channel, rv);
3298

3299 3300
			intf->channel_list = intf->wchannels + set;
			intf->channels_ready = true;
L
Linus Torvalds 已提交
3301 3302 3303 3304 3305 3306 3307
			wake_up(&intf->waitq);
		}
	}
 out:
	return;
}

3308 3309 3310
/*
 * Must be holding intf->bmc_reg_mutex to call this.
 */
3311
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354
{
	int rv;

	if (ipmi_version_major(id) > 1
			|| (ipmi_version_major(id) == 1
			    && ipmi_version_minor(id) >= 5)) {
		unsigned int set;

		/*
		 * Start scanning the channels to see what is
		 * available.
		 */
		set = !intf->curr_working_cset;
		intf->curr_working_cset = set;
		memset(&intf->wchannels[set], 0,
		       sizeof(struct ipmi_channel_set));

		intf->null_user_handler = channel_handler;
		intf->curr_channel = 0;
		rv = send_channel_info_cmd(intf, 0);
		if (rv) {
			dev_warn(intf->si_dev,
				 "Error sending channel information for channel 0, %d\n",
				 rv);
			return -EIO;
		}

		/* Wait for the channel info to be read. */
		wait_event(intf->waitq, intf->channels_ready);
		intf->null_user_handler = NULL;
	} else {
		unsigned int set = intf->curr_working_cset;

		/* Assume a single IPMB channel at zero. */
		intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
		intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
		intf->channel_list = intf->wchannels + set;
		intf->channels_ready = true;
	}

	return 0;
}

3355
static void ipmi_poll(struct ipmi_smi *intf)
C
Corey Minyard 已提交
3356 3357 3358
{
	if (intf->handlers->poll)
		intf->handlers->poll(intf->send_info);
3359 3360
	/* In case something came in */
	handle_new_recv_msgs(intf);
C
Corey Minyard 已提交
3361
}
3362

3363
void ipmi_poll_interface(struct ipmi_user *user)
3364 3365
{
	ipmi_poll(user->intf);
C
Corey Minyard 已提交
3366
}
3367
EXPORT_SYMBOL(ipmi_poll_interface);
C
Corey Minyard 已提交
3368

3369 3370
static void redo_bmc_reg(struct work_struct *work)
{
3371 3372
	struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
					     bmc_reg_work);
3373 3374 3375 3376 3377 3378 3379

	if (!intf->in_shutdown)
		bmc_get_device_id(intf, NULL, NULL, NULL, NULL);

	kref_put(&intf->refcount, intf_free);
}

3380
int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
L
Linus Torvalds 已提交
3381
		      void		       *send_info,
3382
		      struct device            *si_dev,
3383
		      unsigned char            slave_addr)
L
Linus Torvalds 已提交
3384 3385 3386
{
	int              i, j;
	int              rv;
3387
	struct ipmi_smi *intf, *tintf;
3388
	struct list_head *link;
3389
	struct ipmi_device_id id;
L
Linus Torvalds 已提交
3390

3391 3392 3393 3394
	/*
	 * Make sure the driver is actually initialized, this handles
	 * problems with initialization order.
	 */
3395 3396 3397
	rv = ipmi_init_msghandler();
	if (rv)
		return rv;
L
Linus Torvalds 已提交
3398

3399
	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3400
	if (!intf)
L
Linus Torvalds 已提交
3401
		return -ENOMEM;
3402

3403 3404 3405 3406 3407 3408 3409
	rv = init_srcu_struct(&intf->users_srcu);
	if (rv) {
		kfree(intf);
		return rv;
	}


3410
	intf->bmc = &intf->tmp_bmc;
3411
	INIT_LIST_HEAD(&intf->bmc->intfs);
3412 3413 3414
	mutex_init(&intf->bmc->dyn_mutex);
	INIT_LIST_HEAD(&intf->bmc_link);
	mutex_init(&intf->bmc_reg_mutex);
3415
	intf->intf_num = -1; /* Mark it invalid for now. */
3416
	kref_init(&intf->refcount);
3417
	INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3418
	intf->si_dev = si_dev;
3419
	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3420 3421
		intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
		intf->addrinfo[j].lun = 2;
3422 3423
	}
	if (slave_addr != 0)
3424
		intf->addrinfo[0].address = slave_addr;
3425 3426 3427 3428 3429 3430 3431 3432 3433
	INIT_LIST_HEAD(&intf->users);
	intf->handlers = handlers;
	intf->send_info = send_info;
	spin_lock_init(&intf->seq_lock);
	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
		intf->seq_table[j].inuse = 0;
		intf->seq_table[j].seqid = 0;
	}
	intf->curr_seq = 0;
3434 3435
	spin_lock_init(&intf->waiting_rcv_msgs_lock);
	INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3436 3437 3438 3439
	tasklet_init(&intf->recv_tasklet,
		     smi_recv_tasklet,
		     (unsigned long) intf);
	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3440 3441 3442
	spin_lock_init(&intf->xmit_msgs_lock);
	INIT_LIST_HEAD(&intf->xmit_msgs);
	INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3443
	spin_lock_init(&intf->events_lock);
3444
	spin_lock_init(&intf->watch_lock);
3445 3446
	atomic_set(&intf->event_waiters, 0);
	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3447 3448
	INIT_LIST_HEAD(&intf->waiting_events);
	intf->waiting_events_count = 0;
3449
	mutex_init(&intf->cmd_rcvrs_mutex);
C
Corey Minyard 已提交
3450
	spin_lock_init(&intf->maintenance_mode_lock);
3451 3452
	INIT_LIST_HEAD(&intf->cmd_rcvrs);
	init_waitqueue_head(&intf->waitq);
3453 3454
	for (i = 0; i < IPMI_NUM_STATS; i++)
		atomic_set(&intf->stats[i], 0);
3455

3456 3457 3458 3459 3460 3461 3462
	mutex_lock(&ipmi_interfaces_mutex);
	/* Look for a hole in the numbers. */
	i = 0;
	link = &ipmi_interfaces;
	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
		if (tintf->intf_num != i) {
			link = &tintf->link;
L
Linus Torvalds 已提交
3463 3464
			break;
		}
3465
		i++;
L
Linus Torvalds 已提交
3466
	}
3467 3468 3469 3470 3471
	/* Add the new interface in numeric order. */
	if (i == 0)
		list_add_rcu(&intf->link, &ipmi_interfaces);
	else
		list_add_tail_rcu(&intf->link, link);
L
Linus Torvalds 已提交
3472

3473 3474
	rv = handlers->start_processing(send_info, intf);
	if (rv)
3475
		goto out_err;
L
Linus Torvalds 已提交
3476

3477
	rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3478 3479
	if (rv) {
		dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3480
		goto out_err_started;
3481 3482
	}

3483 3484 3485
	mutex_lock(&intf->bmc_reg_mutex);
	rv = __scan_channels(intf, &id);
	mutex_unlock(&intf->bmc_reg_mutex);
3486 3487
	if (rv)
		goto out_err_bmc_reg;
L
Linus Torvalds 已提交
3488

3489 3490 3491 3492 3493 3494 3495 3496
	/*
	 * Keep memory order straight for RCU readers.  Make
	 * sure everything else is committed to memory before
	 * setting intf_num to mark the interface valid.
	 */
	smp_wmb();
	intf->intf_num = i;
	mutex_unlock(&ipmi_interfaces_mutex);
3497

3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513
	/* After this point the interface is legal to use. */
	call_smi_watchers(i, intf->si_dev);

	return 0;

 out_err_bmc_reg:
	ipmi_bmc_unregister(intf);
 out_err_started:
	if (intf->handlers->shutdown)
		intf->handlers->shutdown(intf->send_info);
 out_err:
	list_del_rcu(&intf->link);
	mutex_unlock(&ipmi_interfaces_mutex);
	synchronize_srcu(&ipmi_interfaces_srcu);
	cleanup_srcu_struct(&intf->users_srcu);
	kref_put(&intf->refcount, intf_free);
L
Linus Torvalds 已提交
3514 3515 3516

	return rv;
}
3517
EXPORT_SYMBOL(ipmi_register_smi);
L
Linus Torvalds 已提交
3518

3519
static void deliver_smi_err_response(struct ipmi_smi *intf,
3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
				     struct ipmi_smi_msg *msg,
				     unsigned char err)
{
	msg->rsp[0] = msg->data[0] | 4;
	msg->rsp[1] = msg->data[1];
	msg->rsp[2] = err;
	msg->rsp_size = 3;
	/* It's an error, so it will never requeue, no need to check return. */
	handle_one_recv_msg(intf, msg);
}

3531
static void cleanup_smi_msgs(struct ipmi_smi *intf)
3532 3533 3534
{
	int              i;
	struct seq_table *ent;
3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548
	struct ipmi_smi_msg *msg;
	struct list_head *entry;
	struct list_head tmplist;

	/* Clear out our transmit queues and hold the messages. */
	INIT_LIST_HEAD(&tmplist);
	list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
	list_splice_tail(&intf->xmit_msgs, &tmplist);

	/* Current message first, to preserve order */
	while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
		/* Wait for the message to clear out. */
		schedule_timeout(1);
	}
3549 3550

	/* No need for locks, the interface is down. */
3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562

	/*
	 * Return errors for all pending messages in queue and in the
	 * tables waiting for remote responses.
	 */
	while (!list_empty(&tmplist)) {
		entry = tmplist.next;
		list_del(entry);
		msg = list_entry(entry, struct ipmi_smi_msg, link);
		deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
	}

3563
	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3564
		ent = &intf->seq_table[i];
3565 3566
		if (!ent->inuse)
			continue;
C
Corey Minyard 已提交
3567
		deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3568 3569 3570
	}
}

3571
void ipmi_unregister_smi(struct ipmi_smi *intf)
L
Linus Torvalds 已提交
3572 3573
{
	struct ipmi_smi_watcher *w;
3574
	int intf_num = intf->intf_num, index;
L
Linus Torvalds 已提交
3575

3576
	mutex_lock(&ipmi_interfaces_mutex);
3577
	intf->intf_num = -1;
3578
	intf->in_shutdown = true;
3579 3580
	list_del_rcu(&intf->link);
	mutex_unlock(&ipmi_interfaces_mutex);
3581
	synchronize_srcu(&ipmi_interfaces_srcu);
3582

3583
	/* At this point no users can be added to the interface. */
L
Linus Torvalds 已提交
3584

3585 3586
	/*
	 * Call all the watcher interfaces to tell them that
3587
	 * an interface is going away.
3588
	 */
3589
	mutex_lock(&smi_watchers_mutex);
3590
	list_for_each_entry(w, &smi_watchers, link)
3591 3592
		w->smi_gone(intf_num);
	mutex_unlock(&smi_watchers_mutex);
3593

3594 3595 3596 3597 3598 3599 3600 3601 3602 3603
	index = srcu_read_lock(&intf->users_srcu);
	while (!list_empty(&intf->users)) {
		struct ipmi_user *user =
			container_of(list_next_rcu(&intf->users),
				     struct ipmi_user, link);

		_ipmi_destroy_user(user);
	}
	srcu_read_unlock(&intf->users_srcu, index);

3604 3605
	if (intf->handlers->shutdown)
		intf->handlers->shutdown(intf->send_info);
3606 3607 3608 3609 3610 3611

	cleanup_smi_msgs(intf);

	ipmi_bmc_unregister(intf);

	cleanup_srcu_struct(&intf->users_srcu);
3612
	kref_put(&intf->refcount, intf_free);
L
Linus Torvalds 已提交
3613
}
3614
EXPORT_SYMBOL(ipmi_unregister_smi);
L
Linus Torvalds 已提交
3615

3616
static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
3617 3618 3619 3620 3621
				   struct ipmi_smi_msg *msg)
{
	struct ipmi_ipmb_addr ipmb_addr;
	struct ipmi_recv_msg  *recv_msg;

3622 3623 3624 3625
	/*
	 * This is 11, not 10, because the response must contain a
	 * completion code.
	 */
L
Linus Torvalds 已提交
3626 3627
	if (msg->rsp_size < 11) {
		/* Message not big enough, just ignore it. */
3628
		ipmi_inc_stat(intf, invalid_ipmb_responses);
L
Linus Torvalds 已提交
3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
	ipmb_addr.slave_addr = msg->rsp[6];
	ipmb_addr.channel = msg->rsp[3] & 0x0f;
	ipmb_addr.lun = msg->rsp[7] & 3;

3642 3643 3644 3645
	/*
	 * It's a response from a remote entity.  Look up the sequence
	 * number and handle the response.
	 */
L
Linus Torvalds 已提交
3646 3647 3648 3649 3650
	if (intf_find_seq(intf,
			  msg->rsp[7] >> 2,
			  msg->rsp[3] & 0x0f,
			  msg->rsp[8],
			  (msg->rsp[4] >> 2) & (~1),
3651
			  (struct ipmi_addr *) &ipmb_addr,
3652 3653 3654 3655 3656
			  &recv_msg)) {
		/*
		 * We were unable to find the sequence number,
		 * so just nuke the message.
		 */
3657
		ipmi_inc_stat(intf, unhandled_ipmb_responses);
L
Linus Torvalds 已提交
3658 3659 3660
		return 0;
	}

3661
	memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3662 3663 3664 3665 3666
	/*
	 * The other fields matched, so no need to set them, except
	 * for netfn, which needs to be the response that was
	 * returned, not the request value.
	 */
L
Linus Torvalds 已提交
3667 3668 3669 3670
	recv_msg->msg.netfn = msg->rsp[4] >> 2;
	recv_msg->msg.data = recv_msg->msg_data;
	recv_msg->msg.data_len = msg->rsp_size - 10;
	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
C
Corey Minyard 已提交
3671 3672 3673 3674
	if (deliver_response(intf, recv_msg))
		ipmi_inc_stat(intf, unhandled_ipmb_responses);
	else
		ipmi_inc_stat(intf, handled_ipmb_responses);
L
Linus Torvalds 已提交
3675 3676 3677 3678

	return 0;
}

3679
static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
3680 3681
				   struct ipmi_smi_msg *msg)
{
3682 3683 3684 3685
	struct cmd_rcvr          *rcvr;
	int                      rv = 0;
	unsigned char            netfn;
	unsigned char            cmd;
3686
	unsigned char            chan;
3687
	struct ipmi_user         *user = NULL;
3688 3689
	struct ipmi_ipmb_addr    *ipmb_addr;
	struct ipmi_recv_msg     *recv_msg;
L
Linus Torvalds 已提交
3690 3691 3692

	if (msg->rsp_size < 10) {
		/* Message not big enough, just ignore it. */
3693
		ipmi_inc_stat(intf, invalid_commands);
L
Linus Torvalds 已提交
3694 3695 3696 3697 3698 3699 3700 3701 3702 3703
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	netfn = msg->rsp[4] >> 2;
	cmd = msg->rsp[8];
3704
	chan = msg->rsp[3] & 0xf;
L
Linus Torvalds 已提交
3705

3706
	rcu_read_lock();
3707
	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3708 3709 3710 3711 3712
	if (rcvr) {
		user = rcvr->user;
		kref_get(&user->refcount);
	} else
		user = NULL;
3713
	rcu_read_unlock();
L
Linus Torvalds 已提交
3714 3715 3716

	if (user == NULL) {
		/* We didn't find a user, deliver an error response. */
3717
		ipmi_inc_stat(intf, unhandled_commands);
L
Linus Torvalds 已提交
3718 3719 3720 3721 3722

		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
		msg->data[1] = IPMI_SEND_MSG_CMD;
		msg->data[2] = msg->rsp[3];
		msg->data[3] = msg->rsp[6];
3723
		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3724
		msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3725
		msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3726 3727
		/* rqseq/lun */
		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
L
Linus Torvalds 已提交
3728 3729
		msg->data[8] = msg->rsp[8]; /* cmd */
		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3730
		msg->data[10] = ipmb_checksum(&msg->data[6], 4);
L
Linus Torvalds 已提交
3731 3732
		msg->data_size = 11;

C
Corey Minyard 已提交
3733 3734
		ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);

3735
		rcu_read_lock();
3736 3737
		if (!intf->in_shutdown) {
			smi_send(intf, intf->handlers, msg, 0);
3738 3739 3740 3741 3742
			/*
			 * We used the message, so return the value
			 * that causes it to not be freed or
			 * queued.
			 */
3743 3744 3745
			rv = -1;
		}
		rcu_read_unlock();
L
Linus Torvalds 已提交
3746 3747
	} else {
		recv_msg = ipmi_alloc_recv_msg();
3748
		if (!recv_msg) {
3749 3750 3751 3752 3753
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling
			 * later.
			 */
L
Linus Torvalds 已提交
3754
			rv = 1;
3755
			kref_put(&user->refcount, free_user);
L
Linus Torvalds 已提交
3756 3757 3758 3759 3760 3761 3762 3763
		} else {
			/* Extract the source address from the data. */
			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
			ipmb_addr->slave_addr = msg->rsp[6];
			ipmb_addr->lun = msg->rsp[7] & 3;
			ipmb_addr->channel = msg->rsp[3] & 0xf;

3764 3765 3766 3767
			/*
			 * Extract the rest of the message information
			 * from the IPMB header.
			 */
L
Linus Torvalds 已提交
3768 3769 3770 3771 3772 3773 3774
			recv_msg->user = user;
			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
			recv_msg->msgid = msg->rsp[7] >> 2;
			recv_msg->msg.netfn = msg->rsp[4] >> 2;
			recv_msg->msg.cmd = msg->rsp[8];
			recv_msg->msg.data = recv_msg->msg_data;

3775 3776 3777 3778
			/*
			 * We chop off 10, not 9 bytes because the checksum
			 * at the end also needs to be removed.
			 */
L
Linus Torvalds 已提交
3779
			recv_msg->msg.data_len = msg->rsp_size - 10;
3780
			memcpy(recv_msg->msg_data, &msg->rsp[9],
L
Linus Torvalds 已提交
3781
			       msg->rsp_size - 10);
C
Corey Minyard 已提交
3782 3783 3784 3785
			if (deliver_response(intf, recv_msg))
				ipmi_inc_stat(intf, unhandled_commands);
			else
				ipmi_inc_stat(intf, handled_commands);
L
Linus Torvalds 已提交
3786 3787 3788 3789 3790 3791
		}
	}

	return rv;
}

3792
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
3793 3794 3795 3796 3797 3798
				  struct ipmi_smi_msg *msg)
{
	struct ipmi_lan_addr  lan_addr;
	struct ipmi_recv_msg  *recv_msg;


3799 3800 3801 3802
	/*
	 * This is 13, not 12, because the response must contain a
	 * completion code.
	 */
L
Linus Torvalds 已提交
3803 3804
	if (msg->rsp_size < 13) {
		/* Message not big enough, just ignore it. */
3805
		ipmi_inc_stat(intf, invalid_lan_responses);
L
Linus Torvalds 已提交
3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
	lan_addr.session_handle = msg->rsp[4];
	lan_addr.remote_SWID = msg->rsp[8];
	lan_addr.local_SWID = msg->rsp[5];
	lan_addr.channel = msg->rsp[3] & 0x0f;
	lan_addr.privilege = msg->rsp[3] >> 4;
	lan_addr.lun = msg->rsp[9] & 3;

3822 3823 3824 3825
	/*
	 * It's a response from a remote entity.  Look up the sequence
	 * number and handle the response.
	 */
L
Linus Torvalds 已提交
3826 3827 3828 3829 3830
	if (intf_find_seq(intf,
			  msg->rsp[9] >> 2,
			  msg->rsp[3] & 0x0f,
			  msg->rsp[10],
			  (msg->rsp[6] >> 2) & (~1),
3831
			  (struct ipmi_addr *) &lan_addr,
3832 3833 3834 3835 3836
			  &recv_msg)) {
		/*
		 * We were unable to find the sequence number,
		 * so just nuke the message.
		 */
3837
		ipmi_inc_stat(intf, unhandled_lan_responses);
L
Linus Torvalds 已提交
3838 3839 3840
		return 0;
	}

3841
	memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3842 3843 3844 3845 3846
	/*
	 * The other fields matched, so no need to set them, except
	 * for netfn, which needs to be the response that was
	 * returned, not the request value.
	 */
L
Linus Torvalds 已提交
3847 3848 3849 3850
	recv_msg->msg.netfn = msg->rsp[6] >> 2;
	recv_msg->msg.data = recv_msg->msg_data;
	recv_msg->msg.data_len = msg->rsp_size - 12;
	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
C
Corey Minyard 已提交
3851 3852 3853 3854
	if (deliver_response(intf, recv_msg))
		ipmi_inc_stat(intf, unhandled_lan_responses);
	else
		ipmi_inc_stat(intf, handled_lan_responses);
L
Linus Torvalds 已提交
3855 3856 3857 3858

	return 0;
}

3859
static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
3860 3861
				  struct ipmi_smi_msg *msg)
{
3862 3863 3864 3865
	struct cmd_rcvr          *rcvr;
	int                      rv = 0;
	unsigned char            netfn;
	unsigned char            cmd;
3866
	unsigned char            chan;
3867
	struct ipmi_user         *user = NULL;
3868 3869
	struct ipmi_lan_addr     *lan_addr;
	struct ipmi_recv_msg     *recv_msg;
L
Linus Torvalds 已提交
3870 3871 3872

	if (msg->rsp_size < 12) {
		/* Message not big enough, just ignore it. */
3873
		ipmi_inc_stat(intf, invalid_commands);
L
Linus Torvalds 已提交
3874 3875 3876 3877 3878 3879 3880 3881 3882 3883
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	netfn = msg->rsp[6] >> 2;
	cmd = msg->rsp[10];
3884
	chan = msg->rsp[3] & 0xf;
L
Linus Torvalds 已提交
3885

3886
	rcu_read_lock();
3887
	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3888 3889 3890 3891 3892
	if (rcvr) {
		user = rcvr->user;
		kref_get(&user->refcount);
	} else
		user = NULL;
3893
	rcu_read_unlock();
L
Linus Torvalds 已提交
3894 3895

	if (user == NULL) {
3896
		/* We didn't find a user, just give up. */
3897
		ipmi_inc_stat(intf, unhandled_commands);
L
Linus Torvalds 已提交
3898

3899 3900 3901 3902 3903
		/*
		 * Don't do anything with these messages, just allow
		 * them to be freed.
		 */
		rv = 0;
L
Linus Torvalds 已提交
3904 3905
	} else {
		recv_msg = ipmi_alloc_recv_msg();
3906
		if (!recv_msg) {
3907 3908 3909 3910
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling later.
			 */
L
Linus Torvalds 已提交
3911
			rv = 1;
3912
			kref_put(&user->refcount, free_user);
L
Linus Torvalds 已提交
3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923
		} else {
			/* Extract the source address from the data. */
			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
			lan_addr->session_handle = msg->rsp[4];
			lan_addr->remote_SWID = msg->rsp[8];
			lan_addr->local_SWID = msg->rsp[5];
			lan_addr->lun = msg->rsp[9] & 3;
			lan_addr->channel = msg->rsp[3] & 0xf;
			lan_addr->privilege = msg->rsp[3] >> 4;

3924 3925 3926 3927
			/*
			 * Extract the rest of the message information
			 * from the IPMB header.
			 */
L
Linus Torvalds 已提交
3928 3929 3930 3931 3932 3933 3934
			recv_msg->user = user;
			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
			recv_msg->msgid = msg->rsp[9] >> 2;
			recv_msg->msg.netfn = msg->rsp[6] >> 2;
			recv_msg->msg.cmd = msg->rsp[10];
			recv_msg->msg.data = recv_msg->msg_data;

3935 3936 3937 3938
			/*
			 * We chop off 12, not 11 bytes because the checksum
			 * at the end also needs to be removed.
			 */
L
Linus Torvalds 已提交
3939
			recv_msg->msg.data_len = msg->rsp_size - 12;
3940
			memcpy(recv_msg->msg_data, &msg->rsp[11],
L
Linus Torvalds 已提交
3941
			       msg->rsp_size - 12);
C
Corey Minyard 已提交
3942 3943 3944 3945
			if (deliver_response(intf, recv_msg))
				ipmi_inc_stat(intf, unhandled_commands);
			else
				ipmi_inc_stat(intf, handled_commands);
L
Linus Torvalds 已提交
3946 3947 3948 3949 3950 3951
		}
	}

	return rv;
}

D
dann frazier 已提交
3952 3953 3954 3955 3956 3957
/*
 * This routine will handle "Get Message" command responses with
 * channels that use an OEM Medium. The message format belongs to
 * the OEM.  See IPMI 2.0 specification, Chapter 6 and
 * Chapter 22, sections 22.6 and 22.24 for more details.
 */
3958
static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
D
dann frazier 已提交
3959 3960 3961 3962 3963 3964 3965
				  struct ipmi_smi_msg *msg)
{
	struct cmd_rcvr       *rcvr;
	int                   rv = 0;
	unsigned char         netfn;
	unsigned char         cmd;
	unsigned char         chan;
3966
	struct ipmi_user *user = NULL;
D
dann frazier 已提交
3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029
	struct ipmi_system_interface_addr *smi_addr;
	struct ipmi_recv_msg  *recv_msg;

	/*
	 * We expect the OEM SW to perform error checking
	 * so we just do some basic sanity checks
	 */
	if (msg->rsp_size < 4) {
		/* Message not big enough, just ignore it. */
		ipmi_inc_stat(intf, invalid_commands);
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	/*
	 * This is an OEM Message so the OEM needs to know how
	 * handle the message. We do no interpretation.
	 */
	netfn = msg->rsp[0] >> 2;
	cmd = msg->rsp[1];
	chan = msg->rsp[3] & 0xf;

	rcu_read_lock();
	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
	if (rcvr) {
		user = rcvr->user;
		kref_get(&user->refcount);
	} else
		user = NULL;
	rcu_read_unlock();

	if (user == NULL) {
		/* We didn't find a user, just give up. */
		ipmi_inc_stat(intf, unhandled_commands);

		/*
		 * Don't do anything with these messages, just allow
		 * them to be freed.
		 */

		rv = 0;
	} else {
		recv_msg = ipmi_alloc_recv_msg();
		if (!recv_msg) {
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling
			 * later.
			 */
			rv = 1;
			kref_put(&user->refcount, free_user);
		} else {
			/*
			 * OEM Messages are expected to be delivered via
			 * the system interface to SMS software.  We might
			 * need to visit this again depending on OEM
			 * requirements
			 */
			smi_addr = ((struct ipmi_system_interface_addr *)
4030
				    &recv_msg->addr);
D
dann frazier 已提交
4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046
			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
			smi_addr->channel = IPMI_BMC_CHANNEL;
			smi_addr->lun = msg->rsp[0] & 3;

			recv_msg->user = user;
			recv_msg->user_msg_data = NULL;
			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
			recv_msg->msg.netfn = msg->rsp[0] >> 2;
			recv_msg->msg.cmd = msg->rsp[1];
			recv_msg->msg.data = recv_msg->msg_data;

			/*
			 * The message starts at byte 4 which follows the
			 * the Channel Byte in the "GET MESSAGE" command
			 */
			recv_msg->msg.data_len = msg->rsp_size - 4;
4047
			memcpy(recv_msg->msg_data, &msg->rsp[4],
D
dann frazier 已提交
4048
			       msg->rsp_size - 4);
C
Corey Minyard 已提交
4049 4050 4051 4052
			if (deliver_response(intf, recv_msg))
				ipmi_inc_stat(intf, unhandled_commands);
			else
				ipmi_inc_stat(intf, handled_commands);
D
dann frazier 已提交
4053 4054 4055 4056 4057 4058
		}
	}

	return rv;
}

L
Linus Torvalds 已提交
4059 4060 4061 4062
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
				     struct ipmi_smi_msg  *msg)
{
	struct ipmi_system_interface_addr *smi_addr;
4063

L
Linus Torvalds 已提交
4064
	recv_msg->msgid = 0;
4065
	smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
L
Linus Torvalds 已提交
4066 4067 4068 4069 4070 4071
	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	smi_addr->channel = IPMI_BMC_CHANNEL;
	smi_addr->lun = msg->rsp[0] & 3;
	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
	recv_msg->msg.netfn = msg->rsp[0] >> 2;
	recv_msg->msg.cmd = msg->rsp[1];
4072
	memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
L
Linus Torvalds 已提交
4073 4074 4075 4076
	recv_msg->msg.data = recv_msg->msg_data;
	recv_msg->msg.data_len = msg->rsp_size - 3;
}

4077
static int handle_read_event_rsp(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
4078 4079 4080 4081
				 struct ipmi_smi_msg *msg)
{
	struct ipmi_recv_msg *recv_msg, *recv_msg2;
	struct list_head     msgs;
4082
	struct ipmi_user     *user;
4083
	int rv = 0, deliver_count = 0, index;
L
Linus Torvalds 已提交
4084 4085 4086 4087
	unsigned long        flags;

	if (msg->rsp_size < 19) {
		/* Message is too small to be an IPMB event. */
4088
		ipmi_inc_stat(intf, invalid_events);
L
Linus Torvalds 已提交
4089 4090 4091 4092 4093 4094 4095 4096 4097 4098
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the event, just ignore it. */
		return 0;
	}

	INIT_LIST_HEAD(&msgs);

4099
	spin_lock_irqsave(&intf->events_lock, flags);
L
Linus Torvalds 已提交
4100

4101
	ipmi_inc_stat(intf, events);
L
Linus Torvalds 已提交
4102

4103 4104 4105 4106
	/*
	 * Allocate and fill in one message for every user that is
	 * getting events.
	 */
4107
	index = srcu_read_lock(&intf->users_srcu);
4108
	list_for_each_entry_rcu(user, &intf->users, link) {
4109
		if (!user->gets_events)
L
Linus Torvalds 已提交
4110 4111 4112
			continue;

		recv_msg = ipmi_alloc_recv_msg();
4113
		if (!recv_msg) {
4114
			rcu_read_unlock();
4115 4116
			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
						 link) {
L
Linus Torvalds 已提交
4117 4118 4119
				list_del(&recv_msg->link);
				ipmi_free_recv_msg(recv_msg);
			}
4120 4121 4122 4123 4124
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling
			 * later.
			 */
L
Linus Torvalds 已提交
4125 4126 4127 4128 4129 4130 4131 4132
			rv = 1;
			goto out;
		}

		deliver_count++;

		copy_event_into_recv_msg(recv_msg, msg);
		recv_msg->user = user;
4133
		kref_get(&user->refcount);
4134
		list_add_tail(&recv_msg->link, &msgs);
L
Linus Torvalds 已提交
4135
	}
4136
	srcu_read_unlock(&intf->users_srcu, index);
L
Linus Torvalds 已提交
4137 4138 4139 4140 4141

	if (deliver_count) {
		/* Now deliver all the messages. */
		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
			list_del(&recv_msg->link);
C
Corey Minyard 已提交
4142
			deliver_local_response(intf, recv_msg);
L
Linus Torvalds 已提交
4143 4144
		}
	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4145 4146 4147 4148
		/*
		 * No one to receive the message, put it in queue if there's
		 * not already too many things in the queue.
		 */
L
Linus Torvalds 已提交
4149
		recv_msg = ipmi_alloc_recv_msg();
4150
		if (!recv_msg) {
4151 4152 4153 4154 4155
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling
			 * later.
			 */
L
Linus Torvalds 已提交
4156 4157 4158 4159 4160
			rv = 1;
			goto out;
		}

		copy_event_into_recv_msg(recv_msg, msg);
4161
		list_add_tail(&recv_msg->link, &intf->waiting_events);
4162
		intf->waiting_events_count++;
4163
	} else if (!intf->event_msg_printed) {
4164 4165 4166 4167
		/*
		 * There's too many things in the queue, discard this
		 * message.
		 */
4168
		dev_warn(intf->si_dev,
4169
			 "Event queue full, discarding incoming events\n");
4170
		intf->event_msg_printed = 1;
L
Linus Torvalds 已提交
4171 4172 4173
	}

 out:
4174
	spin_unlock_irqrestore(&intf->events_lock, flags);
L
Linus Torvalds 已提交
4175 4176 4177 4178

	return rv;
}

4179
static int handle_bmc_rsp(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
4180 4181 4182
			  struct ipmi_smi_msg *msg)
{
	struct ipmi_recv_msg *recv_msg;
4183
	struct ipmi_system_interface_addr *smi_addr;
L
Linus Torvalds 已提交
4184 4185

	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4186
	if (recv_msg == NULL) {
4187
		dev_warn(intf->si_dev,
4188
			 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4189 4190
		return 0;
	}
L
Linus Torvalds 已提交
4191

4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204
	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
	recv_msg->msgid = msg->msgid;
	smi_addr = ((struct ipmi_system_interface_addr *)
		    &recv_msg->addr);
	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	smi_addr->channel = IPMI_BMC_CHANNEL;
	smi_addr->lun = msg->rsp[0] & 3;
	recv_msg->msg.netfn = msg->rsp[0] >> 2;
	recv_msg->msg.cmd = msg->rsp[1];
	memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
	recv_msg->msg.data = recv_msg->msg_data;
	recv_msg->msg.data_len = msg->rsp_size - 2;
	deliver_local_response(intf, recv_msg);
L
Linus Torvalds 已提交
4205 4206 4207 4208

	return 0;
}

4209
/*
4210
 * Handle a received message.  Return 1 if the message should be requeued,
4211 4212 4213
 * 0 if the message should be freed, or -1 if the message should not
 * be freed or requeued.
 */
4214
static int handle_one_recv_msg(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
4215 4216 4217 4218 4219
			       struct ipmi_smi_msg *msg)
{
	int requeue;
	int chan;

C
Corey Minyard 已提交
4220
	ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267

	if ((msg->data_size >= 2)
	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
	    && (msg->user_data == NULL)) {

		if (intf->in_shutdown)
			goto free_msg;

		/*
		 * This is the local response to a command send, start
		 * the timer for these.  The user_data will not be
		 * NULL if this is a response send, and we will let
		 * response sends just go through.
		 */

		/*
		 * Check for errors, if we get certain errors (ones
		 * that mean basically we can try again later), we
		 * ignore them and start the timer.  Otherwise we
		 * report the error immediately.
		 */
		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
		    && (msg->rsp[2] != IPMI_BUS_ERR)
		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
			int ch = msg->rsp[3] & 0xf;
			struct ipmi_channel *chans;

			/* Got an error sending the message, handle it. */

			chans = READ_ONCE(intf->channel_list)->c;
			if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
			    || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
				ipmi_inc_stat(intf, sent_lan_command_errs);
			else
				ipmi_inc_stat(intf, sent_ipmb_command_errs);
			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
		} else
			/* The message was sent, start the timer. */
			intf_start_seq_timer(intf, msg->msgid);
free_msg:
		requeue = 0;
		goto out;

	} else if (msg->rsp_size < 2) {
L
Linus Torvalds 已提交
4268
		/* Message is too small to be correct. */
4269
		dev_warn(intf->si_dev,
4270
			 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4271
			 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
L
Linus Torvalds 已提交
4272 4273 4274 4275 4276 4277

		/* Generate an error response for the message. */
		msg->rsp[0] = msg->data[0] | (1 << 2);
		msg->rsp[1] = msg->data[1];
		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
		msg->rsp_size = 3;
4278 4279 4280 4281 4282 4283
	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
		   || (msg->rsp[1] != msg->data[1])) {
		/*
		 * The NetFN and Command in the response is not even
		 * marginally correct.
		 */
4284
		dev_warn(intf->si_dev,
4285
			 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4286 4287
			 (msg->data[0] >> 2) | 1, msg->data[1],
			 msg->rsp[0] >> 2, msg->rsp[1]);
L
Linus Torvalds 已提交
4288 4289 4290 4291 4292 4293 4294 4295 4296 4297

		/* Generate an error response for the message. */
		msg->rsp[0] = msg->data[0] | (1 << 2);
		msg->rsp[1] = msg->data[1];
		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
		msg->rsp_size = 3;
	}

	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4298 4299 4300 4301 4302
	    && (msg->user_data != NULL)) {
		/*
		 * It's a response to a response we sent.  For this we
		 * deliver a send message response to the user.
		 */
4303
		struct ipmi_recv_msg *recv_msg = msg->user_data;
L
Linus Torvalds 已提交
4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314

		requeue = 0;
		if (msg->rsp_size < 2)
			/* Message is too small to be correct. */
			goto out;

		chan = msg->data[2] & 0x0f;
		if (chan >= IPMI_MAX_CHANNELS)
			/* Invalid channel number */
			goto out;

4315 4316 4317 4318 4319 4320 4321
		if (!recv_msg)
			goto out;

		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
		recv_msg->msg.data = recv_msg->msg_data;
		recv_msg->msg.data_len = 1;
		recv_msg->msg_data[0] = msg->rsp[2];
C
Corey Minyard 已提交
4322
		deliver_local_response(intf, recv_msg);
L
Linus Torvalds 已提交
4323
	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4324
		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4325 4326
		struct ipmi_channel   *chans;

L
Linus Torvalds 已提交
4327 4328 4329 4330 4331 4332 4333 4334
		/* It's from the receive queue. */
		chan = msg->rsp[3] & 0xf;
		if (chan >= IPMI_MAX_CHANNELS) {
			/* Invalid channel number */
			requeue = 0;
			goto out;
		}

D
dann frazier 已提交
4335
		/*
C
Corey Minyard 已提交
4336 4337 4338 4339 4340
		 * We need to make sure the channels have been initialized.
		 * The channel_handler routine will set the "curr_channel"
		 * equal to or greater than IPMI_MAX_CHANNELS when all the
		 * channels for this interface have been initialized.
		 */
4341
		if (!intf->channels_ready) {
C
Corey Minyard 已提交
4342
			requeue = 0; /* Throw the message away */
D
dann frazier 已提交
4343 4344 4345
			goto out;
		}

4346 4347 4348
		chans = READ_ONCE(intf->channel_list)->c;

		switch (chans[chan].medium) {
L
Linus Torvalds 已提交
4349 4350
		case IPMI_CHANNEL_MEDIUM_IPMB:
			if (msg->rsp[4] & 0x04) {
4351 4352 4353 4354
				/*
				 * It's a response, so find the
				 * requesting message and send it up.
				 */
L
Linus Torvalds 已提交
4355 4356
				requeue = handle_ipmb_get_msg_rsp(intf, msg);
			} else {
4357 4358 4359 4360
				/*
				 * It's a command to the SMS from some other
				 * entity.  Handle that.
				 */
L
Linus Torvalds 已提交
4361 4362 4363 4364 4365 4366 4367
				requeue = handle_ipmb_get_msg_cmd(intf, msg);
			}
			break;

		case IPMI_CHANNEL_MEDIUM_8023LAN:
		case IPMI_CHANNEL_MEDIUM_ASYNC:
			if (msg->rsp[6] & 0x04) {
4368 4369 4370 4371
				/*
				 * It's a response, so find the
				 * requesting message and send it up.
				 */
L
Linus Torvalds 已提交
4372 4373
				requeue = handle_lan_get_msg_rsp(intf, msg);
			} else {
4374 4375 4376 4377
				/*
				 * It's a command to the SMS from some other
				 * entity.  Handle that.
				 */
L
Linus Torvalds 已提交
4378 4379 4380 4381 4382
				requeue = handle_lan_get_msg_cmd(intf, msg);
			}
			break;

		default:
D
dann frazier 已提交
4383 4384
			/* Check for OEM Channels.  Clients had better
			   register for these commands. */
4385 4386
			if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
			    && (chans[chan].medium
D
dann frazier 已提交
4387 4388 4389 4390 4391 4392 4393 4394 4395
				<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
				requeue = handle_oem_get_msg_cmd(intf, msg);
			} else {
				/*
				 * We don't handle the channel type, so just
				 * free the message.
				 */
				requeue = 0;
			}
L
Linus Torvalds 已提交
4396 4397 4398
		}

	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4399
		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4400
		/* It's an asynchronous event. */
L
Linus Torvalds 已提交
4401 4402 4403 4404 4405 4406 4407 4408 4409 4410
		requeue = handle_read_event_rsp(intf, msg);
	} else {
		/* It's a response from the local BMC. */
		requeue = handle_bmc_rsp(intf, msg);
	}

 out:
	return requeue;
}

4411 4412 4413
/*
 * If there are messages in the queue or pretimeouts, handle them.
 */
4414
static void handle_new_recv_msgs(struct ipmi_smi *intf)
4415 4416 4417 4418 4419 4420 4421 4422
{
	struct ipmi_smi_msg  *smi_msg;
	unsigned long        flags = 0;
	int                  rv;
	int                  run_to_completion = intf->run_to_completion;

	/* See if any waiting messages need to be processed. */
	if (!run_to_completion)
4423 4424 4425
		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
	while (!list_empty(&intf->waiting_rcv_msgs)) {
		smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4426
				     struct ipmi_smi_msg, link);
4427
		list_del(&smi_msg->link);
4428
		if (!run_to_completion)
4429 4430
			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
					       flags);
4431 4432
		rv = handle_one_recv_msg(intf, smi_msg);
		if (!run_to_completion)
4433
			spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4434
		if (rv > 0) {
4435 4436
			/*
			 * To preserve message order, quit if we
4437 4438 4439 4440
			 * can't handle a message.  Add the message
			 * back at the head, this is safe because this
			 * tasklet is the only thing that pulls the
			 * messages.
4441
			 */
4442
			list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4443
			break;
4444 4445 4446 4447 4448
		} else {
			if (rv == 0)
				/* Message handled */
				ipmi_free_smi_msg(smi_msg);
			/* If rv < 0, fatal error, del but don't free. */
4449 4450 4451
		}
	}
	if (!run_to_completion)
4452
		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4453 4454 4455 4456 4457 4458

	/*
	 * If the pretimout count is non-zero, decrement one from it and
	 * deliver pretimeouts to all the users.
	 */
	if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4459
		struct ipmi_user *user;
4460
		int index;
4461

4462
		index = srcu_read_lock(&intf->users_srcu);
4463 4464 4465 4466 4467
		list_for_each_entry_rcu(user, &intf->users, link) {
			if (user->handler->ipmi_watchdog_pretimeout)
				user->handler->ipmi_watchdog_pretimeout(
					user->handler_data);
		}
4468
		srcu_read_unlock(&intf->users_srcu, index);
4469 4470 4471 4472 4473
	}
}

static void smi_recv_tasklet(unsigned long val)
{
4474
	unsigned long flags = 0; /* keep us warning-free. */
4475
	struct ipmi_smi *intf = (struct ipmi_smi *) val;
4476 4477 4478 4479 4480 4481 4482 4483 4484 4485
	int run_to_completion = intf->run_to_completion;
	struct ipmi_smi_msg *newmsg = NULL;

	/*
	 * Start the next message if available.
	 *
	 * Do this here, not in the actual receiver, because we may deadlock
	 * because the lower layer is allowed to hold locks while calling
	 * message delivery.
	 */
4486 4487 4488

	rcu_read_lock();

4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505
	if (!run_to_completion)
		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
	if (intf->curr_msg == NULL && !intf->in_shutdown) {
		struct list_head *entry = NULL;

		/* Pick the high priority queue first. */
		if (!list_empty(&intf->hp_xmit_msgs))
			entry = intf->hp_xmit_msgs.next;
		else if (!list_empty(&intf->xmit_msgs))
			entry = intf->xmit_msgs.next;

		if (entry) {
			list_del(entry);
			newmsg = list_entry(entry, struct ipmi_smi_msg, link);
			intf->curr_msg = newmsg;
		}
	}
4506

4507 4508 4509
	if (!run_to_completion)
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
	if (newmsg)
4510
		intf->handlers->sender(intf->send_info, newmsg);
4511

4512 4513
	rcu_read_unlock();

4514
	handle_new_recv_msgs(intf);
4515 4516
}

L
Linus Torvalds 已提交
4517
/* Handle a new message from the lower layer. */
4518
void ipmi_smi_msg_received(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
4519 4520
			   struct ipmi_smi_msg *msg)
{
4521
	unsigned long flags = 0; /* keep us warning-free. */
4522
	int run_to_completion = intf->run_to_completion;
L
Linus Torvalds 已提交
4523

4524 4525 4526 4527 4528 4529 4530 4531 4532 4533
	/*
	 * To preserve message order, we keep a queue and deliver from
	 * a tasklet.
	 */
	if (!run_to_completion)
		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
	list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
	if (!run_to_completion)
		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
				       flags);
L
Linus Torvalds 已提交
4534

4535
	if (!run_to_completion)
4536
		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4537 4538 4539 4540
	/*
	 * We can get an asynchronous event or receive message in addition
	 * to commands we send.
	 */
4541 4542
	if (msg == intf->curr_msg)
		intf->curr_msg = NULL;
4543
	if (!run_to_completion)
4544
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4545

4546 4547 4548 4549
	if (run_to_completion)
		smi_recv_tasklet((unsigned long) intf);
	else
		tasklet_schedule(&intf->recv_tasklet);
L
Linus Torvalds 已提交
4550
}
4551
EXPORT_SYMBOL(ipmi_smi_msg_received);
L
Linus Torvalds 已提交
4552

4553
void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
L
Linus Torvalds 已提交
4554
{
4555 4556 4557
	if (intf->in_shutdown)
		return;

4558 4559
	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
	tasklet_schedule(&intf->recv_tasklet);
L
Linus Torvalds 已提交
4560
}
4561
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
L
Linus Torvalds 已提交
4562

C
Corey Minyard 已提交
4563
static struct ipmi_smi_msg *
4564
smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
C
Corey Minyard 已提交
4565
		  unsigned char seq, long seqid)
L
Linus Torvalds 已提交
4566
{
C
Corey Minyard 已提交
4567
	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
L
Linus Torvalds 已提交
4568
	if (!smi_msg)
4569 4570 4571 4572
		/*
		 * If we can't allocate the message, then just return, we
		 * get 4 retries, so this should be ok.
		 */
C
Corey Minyard 已提交
4573
		return NULL;
L
Linus Torvalds 已提交
4574 4575 4576 4577

	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
	smi_msg->data_size = recv_msg->msg.data_len;
	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4578

C
Corey Minyard 已提交
4579 4580
	ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);

C
Corey Minyard 已提交
4581
	return smi_msg;
L
Linus Torvalds 已提交
4582 4583
}

4584
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4585 4586
			      struct list_head *timeouts,
			      unsigned long timeout_period,
4587
			      int slot, unsigned long *flags,
4588
			      bool *need_timer)
4589
{
4590
	struct ipmi_recv_msg *msg;
4591

4592
	if (intf->in_shutdown)
4593
		return;
4594 4595 4596 4597

	if (!ent->inuse)
		return;

4598 4599
	if (timeout_period < ent->timeout) {
		ent->timeout -= timeout_period;
4600
		*need_timer = true;
4601
		return;
4602
	}
4603 4604 4605 4606

	if (ent->retries_left == 0) {
		/* The message has used all its retries. */
		ent->inuse = 0;
4607
		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4608 4609 4610
		msg = ent->recv_msg;
		list_add_tail(&msg->link, timeouts);
		if (ent->broadcast)
4611
			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4612
		else if (is_lan_addr(&ent->recv_msg->addr))
4613
			ipmi_inc_stat(intf, timed_out_lan_commands);
4614
		else
4615
			ipmi_inc_stat(intf, timed_out_ipmb_commands);
4616 4617 4618 4619
	} else {
		struct ipmi_smi_msg *smi_msg;
		/* More retries, send again. */

4620
		*need_timer = true;
4621

4622 4623 4624 4625
		/*
		 * Start with the max timer, set to normal timer after
		 * the message is sent.
		 */
4626 4627 4628 4629
		ent->timeout = MAX_MSG_TIMEOUT;
		ent->retries_left--;
		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
					    ent->seqid);
4630 4631 4632 4633 4634 4635 4636
		if (!smi_msg) {
			if (is_lan_addr(&ent->recv_msg->addr))
				ipmi_inc_stat(intf,
					      dropped_rexmit_lan_commands);
			else
				ipmi_inc_stat(intf,
					      dropped_rexmit_ipmb_commands);
4637
			return;
4638
		}
4639 4640

		spin_unlock_irqrestore(&intf->seq_lock, *flags);
4641

4642 4643 4644 4645 4646 4647 4648
		/*
		 * Send the new message.  We send with a zero
		 * priority.  It timed out, I doubt time is that
		 * critical now, and high priority messages are really
		 * only for messages to the local MC, which don't get
		 * resent.
		 */
4649
		if (intf->handlers) {
4650 4651 4652 4653 4654 4655 4656
			if (is_lan_addr(&ent->recv_msg->addr))
				ipmi_inc_stat(intf,
					      retransmitted_lan_commands);
			else
				ipmi_inc_stat(intf,
					      retransmitted_ipmb_commands);

4657
			smi_send(intf, intf->handlers, smi_msg, 0);
4658
		} else
4659 4660
			ipmi_free_smi_msg(smi_msg);

4661 4662 4663 4664
		spin_lock_irqsave(&intf->seq_lock, *flags);
	}
}

4665 4666
static bool ipmi_timeout_handler(struct ipmi_smi *intf,
				 unsigned long timeout_period)
L
Linus Torvalds 已提交
4667 4668 4669 4670
{
	struct list_head     timeouts;
	struct ipmi_recv_msg *msg, *msg2;
	unsigned long        flags;
4671
	int                  i;
4672
	bool                 need_timer = false;
L
Linus Torvalds 已提交
4673

4674 4675 4676 4677
	if (!intf->bmc_registered) {
		kref_get(&intf->refcount);
		if (!schedule_work(&intf->bmc_reg_work)) {
			kref_put(&intf->refcount, intf_free);
4678
			need_timer = true;
4679 4680 4681
		}
	}

4682 4683 4684 4685 4686 4687 4688
	/*
	 * Go through the seq table and find any messages that
	 * have timed out, putting them in the timeouts
	 * list.
	 */
	INIT_LIST_HEAD(&timeouts);
	spin_lock_irqsave(&intf->seq_lock, flags);
4689 4690 4691 4692 4693 4694
	if (intf->ipmb_maintenance_mode_timeout) {
		if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
			intf->ipmb_maintenance_mode_timeout = 0;
		else
			intf->ipmb_maintenance_mode_timeout -= timeout_period;
	}
4695
	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4696
		check_msg_timeout(intf, &intf->seq_table[i],
4697
				  &timeouts, timeout_period, i,
4698
				  &flags, &need_timer);
4699
	spin_unlock_irqrestore(&intf->seq_lock, flags);
4700

4701
	list_for_each_entry_safe(msg, msg2, &timeouts, link)
C
Corey Minyard 已提交
4702
		deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
C
Corey Minyard 已提交
4703

4704 4705 4706 4707 4708 4709 4710 4711 4712 4713
	/*
	 * Maintenance mode handling.  Check the timeout
	 * optimistically before we claim the lock.  It may
	 * mean a timeout gets missed occasionally, but that
	 * only means the timeout gets extended by one period
	 * in that case.  No big deal, and it avoids the lock
	 * most of the time.
	 */
	if (intf->auto_maintenance_timeout > 0) {
		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
C
Corey Minyard 已提交
4714
		if (intf->auto_maintenance_timeout > 0) {
4715 4716 4717 4718
			intf->auto_maintenance_timeout
				-= timeout_period;
			if (!intf->maintenance_mode
			    && (intf->auto_maintenance_timeout <= 0)) {
C
Corey Minyard 已提交
4719
				intf->maintenance_mode_enable = false;
4720
				maintenance_mode_update(intf);
C
Corey Minyard 已提交
4721 4722
			}
		}
4723 4724
		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
				       flags);
L
Linus Torvalds 已提交
4725
	}
4726 4727 4728

	tasklet_schedule(&intf->recv_tasklet);

4729
	return need_timer;
L
Linus Torvalds 已提交
4730 4731
}

4732
static void ipmi_request_event(struct ipmi_smi *intf)
L
Linus Torvalds 已提交
4733
{
4734 4735 4736
	/* No event requests when in maintenance mode. */
	if (intf->maintenance_mode_enable)
		return;
C
Corey Minyard 已提交
4737

4738 4739
	if (!intf->in_shutdown)
		intf->handlers->request_events(intf->send_info);
L
Linus Torvalds 已提交
4740 4741 4742 4743
}

static struct timer_list ipmi_timer;

4744
static atomic_t stop_operation;
L
Linus Torvalds 已提交
4745

4746
static void ipmi_timeout(struct timer_list *unused)
L
Linus Torvalds 已提交
4747
{
4748
	struct ipmi_smi *intf;
4749
	bool need_timer = false;
4750
	int index;
4751

4752
	if (atomic_read(&stop_operation))
L
Linus Torvalds 已提交
4753 4754
		return;

4755
	index = srcu_read_lock(&ipmi_interfaces_srcu);
4756 4757 4758 4759 4760 4761 4762
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		if (atomic_read(&intf->event_waiters)) {
			intf->ticks_to_req_ev--;
			if (intf->ticks_to_req_ev == 0) {
				ipmi_request_event(intf);
				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
			}
4763
			need_timer = true;
4764 4765
		}

4766
		need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4767
	}
4768
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
4769

4770
	if (need_timer)
4771
		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
L
Linus Torvalds 已提交
4772 4773
}

4774
static void need_waiter(struct ipmi_smi *intf)
4775 4776 4777 4778 4779
{
	/* Racy, but worst case we start the timer twice. */
	if (!timer_pending(&ipmi_timer))
		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
L
Linus Torvalds 已提交
4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800

static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);

static void free_smi_msg(struct ipmi_smi_msg *msg)
{
	atomic_dec(&smi_msg_inuse_count);
	kfree(msg);
}

struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
{
	struct ipmi_smi_msg *rv;
	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
	if (rv) {
		rv->done = free_smi_msg;
		rv->user_data = NULL;
		atomic_inc(&smi_msg_inuse_count);
	}
	return rv;
}
4801
EXPORT_SYMBOL(ipmi_alloc_smi_msg);
L
Linus Torvalds 已提交
4802 4803 4804 4805 4806 4807 4808

static void free_recv_msg(struct ipmi_recv_msg *msg)
{
	atomic_dec(&recv_msg_inuse_count);
	kfree(msg);
}

A
Adrian Bunk 已提交
4809
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
L
Linus Torvalds 已提交
4810 4811 4812 4813 4814
{
	struct ipmi_recv_msg *rv;

	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
	if (rv) {
4815
		rv->user = NULL;
L
Linus Torvalds 已提交
4816 4817 4818 4819 4820 4821
		rv->done = free_recv_msg;
		atomic_inc(&recv_msg_inuse_count);
	}
	return rv;
}

4822 4823 4824 4825 4826 4827
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
	if (msg->user)
		kref_put(&msg->user->refcount, free_user);
	msg->done(msg);
}
4828
EXPORT_SYMBOL(ipmi_free_recv_msg);
4829

4830 4831
static atomic_t panic_done_count = ATOMIC_INIT(0);

L
Linus Torvalds 已提交
4832 4833
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
{
4834
	atomic_dec(&panic_done_count);
L
Linus Torvalds 已提交
4835 4836 4837 4838
}

static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
{
4839 4840 4841 4842 4843 4844
	atomic_dec(&panic_done_count);
}

/*
 * Inside a panic, send a message and wait for a response.
 */
4845 4846
static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
					struct ipmi_addr *addr,
4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864
					struct kernel_ipmi_msg *msg)
{
	struct ipmi_smi_msg  smi_msg;
	struct ipmi_recv_msg recv_msg;
	int rv;

	smi_msg.done = dummy_smi_done_handler;
	recv_msg.done = dummy_recv_done_handler;
	atomic_add(2, &panic_done_count);
	rv = i_ipmi_request(NULL,
			    intf,
			    addr,
			    0,
			    msg,
			    intf,
			    &smi_msg,
			    &recv_msg,
			    0,
4865 4866
			    intf->addrinfo[0].address,
			    intf->addrinfo[0].lun,
4867 4868 4869
			    0, 1); /* Don't retry, and don't wait. */
	if (rv)
		atomic_sub(2, &panic_done_count);
4870 4871 4872
	else if (intf->handlers->flush_messages)
		intf->handlers->flush_messages(intf->send_info);

4873 4874
	while (atomic_read(&panic_done_count) != 0)
		ipmi_poll(intf);
L
Linus Torvalds 已提交
4875 4876
}

4877 4878
static void event_receiver_fetcher(struct ipmi_smi *intf,
				   struct ipmi_recv_msg *msg)
L
Linus Torvalds 已提交
4879
{
4880 4881 4882
	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4883
	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
L
Linus Torvalds 已提交
4884
		/* A get event receiver command, save it. */
4885 4886
		intf->event_receiver = msg->msg.data[1];
		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
L
Linus Torvalds 已提交
4887 4888 4889
	}
}

4890
static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
L
Linus Torvalds 已提交
4891
{
4892 4893 4894
	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4895 4896 4897 4898 4899
	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
		/*
		 * A get device id command, save if we are an event
		 * receiver or generator.
		 */
4900 4901
		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
L
Linus Torvalds 已提交
4902 4903 4904
	}
}

4905
static void send_panic_events(struct ipmi_smi *intf, char *str)
L
Linus Torvalds 已提交
4906
{
4907 4908
	struct kernel_ipmi_msg msg;
	unsigned char data[16];
L
Linus Torvalds 已提交
4909
	struct ipmi_system_interface_addr *si;
4910 4911 4912 4913
	struct ipmi_addr addr;
	char *p = str;
	struct ipmi_ipmb_addr *ipmb;
	int j;
L
Linus Torvalds 已提交
4914

4915 4916 4917
	if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
		return;

L
Linus Torvalds 已提交
4918 4919 4920 4921 4922 4923 4924 4925 4926 4927
	si = (struct ipmi_system_interface_addr *) &addr;
	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	si->channel = IPMI_BMC_CHANNEL;
	si->lun = 0;

	/* Fill in an event telling that we have failed. */
	msg.netfn = 0x04; /* Sensor or Event. */
	msg.cmd = 2; /* Platform event command. */
	msg.data = data;
	msg.data_len = 8;
M
Matt Domsch 已提交
4928
	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
L
Linus Torvalds 已提交
4929 4930 4931 4932 4933
	data[1] = 0x03; /* This is for IPMI 1.0. */
	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */

4934 4935 4936 4937
	/*
	 * Put a few breadcrumbs in.  Hopefully later we can add more things
	 * to make the panic events more useful.
	 */
L
Linus Torvalds 已提交
4938 4939 4940 4941 4942 4943
	if (str) {
		data[3] = str[0];
		data[6] = str[1];
		data[7] = str[2];
	}

4944 4945
	/* Send the event announcing the panic. */
	ipmi_panic_request_and_wait(intf, &addr, &msg);
L
Linus Torvalds 已提交
4946

4947 4948 4949 4950
	/*
	 * On every interface, dump a bunch of OEM event holding the
	 * string.
	 */
4951
	if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
L
Linus Torvalds 已提交
4952 4953
		return;

4954 4955 4956 4957 4958 4959 4960
	/*
	 * intf_num is used as an marker to tell if the
	 * interface is valid.  Thus we need a read barrier to
	 * make sure data fetched before checking intf_num
	 * won't be used.
	 */
	smp_rmb();
L
Linus Torvalds 已提交
4961

4962 4963 4964 4965 4966 4967 4968
	/*
	 * First job here is to figure out where to send the
	 * OEM events.  There's no way in IPMI to send OEM
	 * events using an event send command, so we have to
	 * find the SEL to put them in and stick them in
	 * there.
	 */
4969

4970 4971 4972 4973
	/* Get capabilities from the get device id. */
	intf->local_sel_device = 0;
	intf->local_event_generator = 0;
	intf->event_receiver = 0;
L
Linus Torvalds 已提交
4974

4975 4976 4977 4978 4979 4980 4981
	/* Request the device info from the local MC. */
	msg.netfn = IPMI_NETFN_APP_REQUEST;
	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
	msg.data = NULL;
	msg.data_len = 0;
	intf->null_user_handler = device_id_fetcher;
	ipmi_panic_request_and_wait(intf, &addr, &msg);
L
Linus Torvalds 已提交
4982

4983 4984 4985 4986
	if (intf->local_event_generator) {
		/* Request the event receiver from the local MC. */
		msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
		msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
L
Linus Torvalds 已提交
4987 4988
		msg.data = NULL;
		msg.data_len = 0;
4989
		intf->null_user_handler = event_receiver_fetcher;
4990
		ipmi_panic_request_and_wait(intf, &addr, &msg);
4991 4992
	}
	intf->null_user_handler = NULL;
L
Linus Torvalds 已提交
4993

4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022
	/*
	 * Validate the event receiver.  The low bit must not
	 * be 1 (it must be a valid IPMB address), it cannot
	 * be zero, and it must not be my address.
	 */
	if (((intf->event_receiver & 1) == 0)
	    && (intf->event_receiver != 0)
	    && (intf->event_receiver != intf->addrinfo[0].address)) {
		/*
		 * The event receiver is valid, send an IPMB
		 * message.
		 */
		ipmb = (struct ipmi_ipmb_addr *) &addr;
		ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
		ipmb->channel = 0; /* FIXME - is this right? */
		ipmb->lun = intf->event_receiver_lun;
		ipmb->slave_addr = intf->event_receiver;
	} else if (intf->local_sel_device) {
		/*
		 * The event receiver was not valid (or was
		 * me), but I am an SEL device, just dump it
		 * in my SEL.
		 */
		si = (struct ipmi_system_interface_addr *) &addr;
		si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
		si->channel = IPMI_BMC_CHANNEL;
		si->lun = 0;
	} else
		return; /* No where to send the event. */
L
Linus Torvalds 已提交
5023

5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039
	msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
	msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
	msg.data = data;
	msg.data_len = 16;

	j = 0;
	while (*p) {
		int size = strlen(p);

		if (size > 11)
			size = 11;
		data[0] = 0;
		data[1] = 0;
		data[2] = 0xf0; /* OEM event without timestamp. */
		data[3] = intf->addrinfo[0].address;
		data[4] = j++; /* sequence # */
5040
		/*
5041 5042
		 * Always give 11 bytes, so strncpy will fill
		 * it with zeroes for me.
5043
		 */
5044 5045
		strncpy(data+5, p, 11);
		p += size;
L
Linus Torvalds 已提交
5046

5047
		ipmi_panic_request_and_wait(intf, &addr, &msg);
5048
	}
L
Linus Torvalds 已提交
5049 5050
}

R
Randy Dunlap 已提交
5051
static int has_panicked;
L
Linus Torvalds 已提交
5052 5053 5054

static int panic_event(struct notifier_block *this,
		       unsigned long         event,
5055
		       void                  *ptr)
L
Linus Torvalds 已提交
5056
{
5057
	struct ipmi_smi *intf;
5058
	struct ipmi_user *user;
L
Linus Torvalds 已提交
5059

L
Lee Revell 已提交
5060
	if (has_panicked)
L
Linus Torvalds 已提交
5061
		return NOTIFY_DONE;
L
Lee Revell 已提交
5062
	has_panicked = 1;
L
Linus Torvalds 已提交
5063 5064

	/* For every registered interface, set it to run to completion. */
5065
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5066
		if (!intf->handlers || intf->intf_num == -1)
5067
			/* Interface is not ready. */
L
Linus Torvalds 已提交
5068 5069
			continue;

5070 5071 5072
		if (!intf->handlers->poll)
			continue;

5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089
		/*
		 * If we were interrupted while locking xmit_msgs_lock or
		 * waiting_rcv_msgs_lock, the corresponding list may be
		 * corrupted.  In this case, drop items on the list for
		 * the safety.
		 */
		if (!spin_trylock(&intf->xmit_msgs_lock)) {
			INIT_LIST_HEAD(&intf->xmit_msgs);
			INIT_LIST_HEAD(&intf->hp_xmit_msgs);
		} else
			spin_unlock(&intf->xmit_msgs_lock);

		if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
			INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
		else
			spin_unlock(&intf->waiting_rcv_msgs_lock);

5090
		intf->run_to_completion = 1;
5091 5092 5093
		if (intf->handlers->set_run_to_completion)
			intf->handlers->set_run_to_completion(intf->send_info,
							      1);
L
Linus Torvalds 已提交
5094

5095 5096 5097 5098 5099 5100 5101 5102
		list_for_each_entry_rcu(user, &intf->users, link) {
			if (user->handler->ipmi_panic_handler)
				user->handler->ipmi_panic_handler(
					user->handler_data);
		}

		send_panic_events(intf, ptr);
	}
L
Linus Torvalds 已提交
5103 5104 5105 5106

	return NOTIFY_DONE;
}

5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122
/* Must be called with ipmi_interfaces_mutex held. */
static int ipmi_register_driver(void)
{
	int rv;

	if (drvregistered)
		return 0;

	rv = driver_register(&ipmidriver.driver);
	if (rv)
		pr_err("Could not register IPMI driver\n");
	else
		drvregistered = true;
	return rv;
}

L
Linus Torvalds 已提交
5123 5124 5125 5126 5127 5128 5129 5130
static struct notifier_block panic_block = {
	.notifier_call	= panic_event,
	.next		= NULL,
	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
};

static int ipmi_init_msghandler(void)
{
5131
	int rv;
L
Linus Torvalds 已提交
5132

5133 5134 5135 5136
	mutex_lock(&ipmi_interfaces_mutex);
	rv = ipmi_register_driver();
	if (rv)
		goto out;
L
Linus Torvalds 已提交
5137
	if (initialized)
5138
		goto out;
L
Linus Torvalds 已提交
5139

5140
	init_srcu_struct(&ipmi_interfaces_srcu);
L
Linus Torvalds 已提交
5141

5142
	timer_setup(&ipmi_timer, ipmi_timeout, 0);
5143
	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
L
Linus Torvalds 已提交
5144

5145
	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
L
Linus Torvalds 已提交
5146

5147
	initialized = true;
L
Linus Torvalds 已提交
5148

5149 5150 5151
out:
	mutex_unlock(&ipmi_interfaces_mutex);
	return rv;
L
Linus Torvalds 已提交
5152 5153
}

5154
static int __init ipmi_init_msghandler_mod(void)
L
Linus Torvalds 已提交
5155
{
5156 5157 5158 5159 5160 5161 5162 5163 5164
	int rv;

	pr_info("version " IPMI_DRIVER_VERSION "\n");

	mutex_lock(&ipmi_interfaces_mutex);
	rv = ipmi_register_driver();
	mutex_unlock(&ipmi_interfaces_mutex);

	return rv;
L
Linus Torvalds 已提交
5165 5166
}

5167
static void __exit cleanup_ipmi(void)
L
Linus Torvalds 已提交
5168 5169 5170
{
	int count;

5171 5172 5173
	if (initialized) {
		atomic_notifier_chain_unregister(&panic_notifier_list,
						 &panic_block);
L
Linus Torvalds 已提交
5174

5175 5176 5177 5178
		/*
		 * This can't be called if any interfaces exist, so no worry
		 * about shutting down the interfaces.
		 */
L
Linus Torvalds 已提交
5179

5180 5181 5182 5183 5184
		/*
		 * Tell the timer to stop, then wait for it to stop.  This
		 * avoids problems with race conditions removing the timer
		 * here.
		 */
5185
		atomic_set(&stop_operation, 1);
5186
		del_timer_sync(&ipmi_timer);
L
Linus Torvalds 已提交
5187

5188
		initialized = false;
5189

5190 5191 5192 5193 5194 5195 5196
		/* Check for buffer leaks. */
		count = atomic_read(&smi_msg_inuse_count);
		if (count != 0)
			pr_warn("SMI message count %d at exit\n", count);
		count = atomic_read(&recv_msg_inuse_count);
		if (count != 0)
			pr_warn("recv message count %d at exit\n", count);
L
Linus Torvalds 已提交
5197

5198 5199 5200 5201
		cleanup_srcu_struct(&ipmi_interfaces_srcu);
	}
	if (drvregistered)
		driver_unregister(&ipmidriver.driver);
L
Linus Torvalds 已提交
5202 5203 5204 5205 5206
}
module_exit(cleanup_ipmi);

module_init(ipmi_init_msghandler_mod);
MODULE_LICENSE("GPL");
5207
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5208 5209
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
		   " interface.");
5210
MODULE_VERSION(IPMI_DRIVER_VERSION);
5211
MODULE_SOFTDEP("post: ipmi_devintf");