ipmi_msghandler.c 130.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13
/*
 * ipmi_msghandler.c
 *
 * Incoming and outgoing message routing for an IPMI interface.
 *
 * Author: MontaVista Software, Inc.
 *         Corey Minyard <minyard@mvista.com>
 *         source@mvista.com
 *
 * Copyright 2002 MontaVista Software Inc.
 */

14 15 16
#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
#define dev_fmt pr_fmt

L
Linus Torvalds 已提交
17 18 19
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/poll.h>
20
#include <linux/sched.h>
21
#include <linux/seq_file.h>
L
Linus Torvalds 已提交
22
#include <linux/spinlock.h>
23
#include <linux/mutex.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29
#include <linux/slab.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
30
#include <linux/rcupdate.h>
31
#include <linux/interrupt.h>
32
#include <linux/moduleparam.h>
33
#include <linux/workqueue.h>
34
#include <linux/uuid.h>
35
#include <linux/nospec.h>
L
Linus Torvalds 已提交
36

C
Corey Minyard 已提交
37
#define IPMI_DRIVER_VERSION "39.2"
L
Linus Torvalds 已提交
38 39 40

static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
41
static void smi_recv_tasklet(unsigned long);
42 43 44
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
45
			       struct ipmi_smi_msg *msg);
L
Linus Torvalds 已提交
46

47 48
static bool initialized;
static bool drvregistered;
L
Linus Torvalds 已提交
49

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
enum ipmi_panic_event_op {
	IPMI_SEND_PANIC_EVENT_NONE,
	IPMI_SEND_PANIC_EVENT,
	IPMI_SEND_PANIC_EVENT_STRING
};
#ifdef CONFIG_IPMI_PANIC_STRING
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
#elif defined(CONFIG_IPMI_PANIC_EVENT)
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
#else
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
#endif
static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;

static int panic_op_write_handler(const char *val,
				  const struct kernel_param *kp)
{
	char valcp[16];
	char *s;

X
Xiongfeng Wang 已提交
70
	strncpy(valcp, val, 15);
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	valcp[15] = '\0';

	s = strstrip(valcp);

	if (strcmp(s, "none") == 0)
		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
	else if (strcmp(s, "event") == 0)
		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
	else if (strcmp(s, "string") == 0)
		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
	else
		return -EINVAL;

	return 0;
}

static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
{
	switch (ipmi_send_panic_event) {
	case IPMI_SEND_PANIC_EVENT_NONE:
		strcpy(buffer, "none");
		break;

	case IPMI_SEND_PANIC_EVENT:
		strcpy(buffer, "event");
		break;

	case IPMI_SEND_PANIC_EVENT_STRING:
		strcpy(buffer, "string");
		break;

	default:
		strcpy(buffer, "???");
		break;
	}

	return strlen(buffer);
}

static const struct kernel_param_ops panic_op_ops = {
	.set = panic_op_write_handler,
	.get = panic_op_read_handler
};
module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");


L
Linus Torvalds 已提交
118 119
#define MAX_EVENTS_IN_QUEUE	25

120 121 122 123 124 125
/* Remain in auto-maintenance mode for this amount of time (in ms). */
static unsigned long maintenance_mode_timeout_ms = 30000;
module_param(maintenance_mode_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(maintenance_mode_timeout_ms,
		 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");

126 127 128 129
/*
 * Don't let a message sit in a queue forever, always time it with at lest
 * the max message timer.  This is in milliseconds.
 */
L
Linus Torvalds 已提交
130 131
#define MAX_MSG_TIMEOUT		60000

132 133 134 135 136 137 138 139 140 141 142 143 144
/*
 * Timeout times below are in milliseconds, and are done off a 1
 * second timer.  So setting the value to 1000 would mean anything
 * between 0 and 1000ms.  So really the only reasonable minimum
 * setting it 2000ms, which is between 1 and 2 seconds.
 */

/* The default timeout for message retries. */
static unsigned long default_retry_ms = 2000;
module_param(default_retry_ms, ulong, 0644);
MODULE_PARM_DESC(default_retry_ms,
		 "The time (milliseconds) between retry sends");

145 146 147 148 149 150
/* The default timeout for maintenance mode message retries. */
static unsigned long default_maintenance_retry_ms = 3000;
module_param(default_maintenance_retry_ms, ulong, 0644);
MODULE_PARM_DESC(default_maintenance_retry_ms,
		 "The time (milliseconds) between retry sends in maintenance mode");

151 152 153 154 155 156
/* The default maximum number of retries */
static unsigned int default_max_retries = 4;
module_param(default_max_retries, uint, 0644);
MODULE_PARM_DESC(default_max_retries,
		 "The time (milliseconds) between retry sends in maintenance mode");

157 158 159 160 161 162 163 164 165 166 167 168 169 170
/* Call every ~1000 ms. */
#define IPMI_TIMEOUT_TIME	1000

/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)

/*
 * Request events from the queue every second (this is the number of
 * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
 * future, IPMI will add a way to know immediately if an event is in
 * the queue and this silliness can go away.
 */
#define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))

171 172 173
/* How long should we cache dynamic device IDs? */
#define IPMI_DYN_DEV_ID_EXPIRY	(10 * HZ)

174 175 176
/*
 * The main "user" data structure.
 */
177
struct ipmi_user {
L
Linus Torvalds 已提交
178 179
	struct list_head link;

180 181 182 183 184 185
	/*
	 * Set to NULL when the user is destroyed, a pointer to myself
	 * so srcu_dereference can be used on it.
	 */
	struct ipmi_user *self;
	struct srcu_struct release_barrier;
186 187 188

	struct kref refcount;

L
Linus Torvalds 已提交
189
	/* The upper layer that handles receive messages. */
C
Corey Minyard 已提交
190
	const struct ipmi_user_hndl *handler;
L
Linus Torvalds 已提交
191 192 193
	void             *handler_data;

	/* The interface this user is bound to. */
194
	struct ipmi_smi *intf;
L
Linus Torvalds 已提交
195 196

	/* Does this interface receive IPMI events? */
197
	bool gets_events;
198 199 200

	/* Free must run in process context for RCU cleanup. */
	struct work_struct remove_work;
L
Linus Torvalds 已提交
201 202
};

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
	__acquires(user->release_barrier)
{
	struct ipmi_user *ruser;

	*index = srcu_read_lock(&user->release_barrier);
	ruser = srcu_dereference(user->self, &user->release_barrier);
	if (!ruser)
		srcu_read_unlock(&user->release_barrier, *index);
	return ruser;
}

static void release_ipmi_user(struct ipmi_user *user, int index)
{
	srcu_read_unlock(&user->release_barrier, index);
}

220
struct cmd_rcvr {
L
Linus Torvalds 已提交
221 222
	struct list_head link;

223
	struct ipmi_user *user;
L
Linus Torvalds 已提交
224 225
	unsigned char netfn;
	unsigned char cmd;
226
	unsigned int  chans;
227 228 229 230 231 232 233

	/*
	 * This is used to form a linked lised during mass deletion.
	 * Since this is in an RCU list, we cannot use the link above
	 * or change any data until the RCU period completes.  So we
	 * use this next variable during mass deletion so we can have
	 * a list and don't have to wait and restart the search on
234 235
	 * every individual deletion of a command.
	 */
236
	struct cmd_rcvr *next;
L
Linus Torvalds 已提交
237 238
};

239
struct seq_table {
L
Linus Torvalds 已提交
240 241 242 243 244 245 246
	unsigned int         inuse : 1;
	unsigned int         broadcast : 1;

	unsigned long        timeout;
	unsigned long        orig_timeout;
	unsigned int         retries_left;

247 248 249 250 251
	/*
	 * To verify on an incoming send message response that this is
	 * the message that the response is for, we keep a sequence id
	 * and increment it every time we send a message.
	 */
L
Linus Torvalds 已提交
252 253
	long                 seqid;

254 255 256 257 258
	/*
	 * This is held so we can properly respond to the message on a
	 * timeout, and it is used to hold the temporary data for
	 * retransmission, too.
	 */
L
Linus Torvalds 已提交
259 260 261
	struct ipmi_recv_msg *recv_msg;
};

262 263 264 265
/*
 * Store the information in a msgid (long) to allow us to find a
 * sequence table entry from the msgid.
 */
C
Corey Minyard 已提交
266 267
#define STORE_SEQ_IN_MSGID(seq, seqid) \
	((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
L
Linus Torvalds 已提交
268 269 270

#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
	do {								\
C
Corey Minyard 已提交
271 272
		seq = (((msgid) >> 26) & 0x3f);				\
		seqid = ((msgid) & 0x3ffffff);				\
273
	} while (0)
L
Linus Torvalds 已提交
274

C
Corey Minyard 已提交
275
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
L
Linus Torvalds 已提交
276

277
#define IPMI_MAX_CHANNELS       16
278
struct ipmi_channel {
L
Linus Torvalds 已提交
279 280
	unsigned char medium;
	unsigned char protocol;
281
};
282

283 284 285 286
struct ipmi_channel_set {
	struct ipmi_channel c[IPMI_MAX_CHANNELS];
};

287
struct ipmi_my_addrinfo {
288 289 290 291
	/*
	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
	 * but may be changed by the user.
	 */
292 293
	unsigned char address;

294 295 296 297
	/*
	 * My LUN.  This should generally stay the SMS LUN, but just in
	 * case...
	 */
298
	unsigned char lun;
L
Linus Torvalds 已提交
299 300
};

301 302 303 304 305
/*
 * Note that the product id, manufacturer id, guid, and device id are
 * immutable in this structure, so dyn_mutex is not required for
 * accessing those.  If those change on a BMC, a new BMC is allocated.
 */
306
struct bmc_device {
307
	struct platform_device pdev;
308
	struct list_head       intfs; /* Interfaces on this BMC. */
309 310 311 312
	struct ipmi_device_id  id;
	struct ipmi_device_id  fetch_id;
	int                    dyn_id_set;
	unsigned long          dyn_id_expiry;
313
	struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
314 315
	guid_t                 guid;
	guid_t                 fetch_guid;
316
	int                    dyn_guid_set;
317
	struct kref	       usecount;
318
	struct work_struct     remove_work;
319
};
320
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
321

322
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
323
			     struct ipmi_device_id *id,
324
			     bool *guid_set, guid_t *guid);
325

326 327 328 329
/*
 * Various statistics for IPMI, these index stats[] in the ipmi_smi
 * structure.
 */
330 331 332
enum ipmi_stat_indexes {
	/* Commands we got from the user that were invalid. */
	IPMI_STAT_sent_invalid_commands = 0,
333

334 335
	/* Commands we sent to the MC. */
	IPMI_STAT_sent_local_commands,
336

337 338
	/* Responses from the MC that were delivered to a user. */
	IPMI_STAT_handled_local_responses,
339

340 341
	/* Responses from the MC that were not delivered to a user. */
	IPMI_STAT_unhandled_local_responses,
342

343 344
	/* Commands we sent out to the IPMB bus. */
	IPMI_STAT_sent_ipmb_commands,
345

346 347
	/* Commands sent on the IPMB that had errors on the SEND CMD */
	IPMI_STAT_sent_ipmb_command_errs,
348

349 350
	/* Each retransmit increments this count. */
	IPMI_STAT_retransmitted_ipmb_commands,
351

352 353 354 355 356
	/*
	 * When a message times out (runs out of retransmits) this is
	 * incremented.
	 */
	IPMI_STAT_timed_out_ipmb_commands,
357

358 359 360 361 362 363
	/*
	 * This is like above, but for broadcasts.  Broadcasts are
	 * *not* included in the above count (they are expected to
	 * time out).
	 */
	IPMI_STAT_timed_out_ipmb_broadcasts,
364

365 366
	/* Responses I have sent to the IPMB bus. */
	IPMI_STAT_sent_ipmb_responses,
367

368 369
	/* The response was delivered to the user. */
	IPMI_STAT_handled_ipmb_responses,
370

371 372
	/* The response had invalid data in it. */
	IPMI_STAT_invalid_ipmb_responses,
373

374 375
	/* The response didn't have anyone waiting for it. */
	IPMI_STAT_unhandled_ipmb_responses,
376

377 378
	/* Commands we sent out to the IPMB bus. */
	IPMI_STAT_sent_lan_commands,
379

380 381
	/* Commands sent on the IPMB that had errors on the SEND CMD */
	IPMI_STAT_sent_lan_command_errs,
382

383 384
	/* Each retransmit increments this count. */
	IPMI_STAT_retransmitted_lan_commands,
385

386 387 388 389 390 391 392 393
	/*
	 * When a message times out (runs out of retransmits) this is
	 * incremented.
	 */
	IPMI_STAT_timed_out_lan_commands,

	/* Responses I have sent to the IPMB bus. */
	IPMI_STAT_sent_lan_responses,
394

395 396
	/* The response was delivered to the user. */
	IPMI_STAT_handled_lan_responses,
397

398 399
	/* The response had invalid data in it. */
	IPMI_STAT_invalid_lan_responses,
400

401 402
	/* The response didn't have anyone waiting for it. */
	IPMI_STAT_unhandled_lan_responses,
403

404 405
	/* The command was delivered to the user. */
	IPMI_STAT_handled_commands,
406

407 408
	/* The command had invalid data in it. */
	IPMI_STAT_invalid_commands,
409

410 411
	/* The command didn't have anyone waiting for it. */
	IPMI_STAT_unhandled_commands,
412

413 414
	/* Invalid data in an event. */
	IPMI_STAT_invalid_events,
415

416 417
	/* Events that were received with the proper format. */
	IPMI_STAT_events,
418

419 420 421 422 423
	/* Retransmissions on IPMB that failed. */
	IPMI_STAT_dropped_rexmit_ipmb_commands,

	/* Retransmissions on LAN that failed. */
	IPMI_STAT_dropped_rexmit_lan_commands,
424

425 426 427
	/* This *must* remain last, add new values above this. */
	IPMI_NUM_STATS
};
428 429


L
Linus Torvalds 已提交
430
#define IPMI_IPMB_NUM_SEQ	64
431
struct ipmi_smi {
432 433
	struct module *owner;

L
Linus Torvalds 已提交
434 435 436
	/* What interface number are we? */
	int intf_num;

437 438
	struct kref refcount;

439 440 441
	/* Set when the interface is being unregistered. */
	bool in_shutdown;

442 443 444
	/* Used for a list of interfaces. */
	struct list_head link;

445
	/*
446 447
	 * The list of upper layers that are using me.  seq_lock write
	 * protects this.  Read protection is with srcu.
448
	 */
449
	struct list_head users;
450
	struct srcu_struct users_srcu;
L
Linus Torvalds 已提交
451 452 453 454

	/* Used for wake ups at startup. */
	wait_queue_head_t waitq;

455 456 457 458 459 460 461
	/*
	 * Prevents the interface from being unregistered when the
	 * interface is used by being looked up through the BMC
	 * structure.
	 */
	struct mutex bmc_reg_mutex;

462
	struct bmc_device tmp_bmc;
463
	struct bmc_device *bmc;
C
Corey Minyard 已提交
464
	bool bmc_registered;
465
	struct list_head bmc_link;
466
	char *my_dev_name;
467
	bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
468
	struct work_struct bmc_reg_work;
L
Linus Torvalds 已提交
469

470
	const struct ipmi_smi_handlers *handlers;
L
Linus Torvalds 已提交
471 472
	void                     *send_info;

473 474 475
	/* Driver-model device for the system interface. */
	struct device          *si_dev;

476 477 478 479 480 481
	/*
	 * A table of sequence numbers for this interface.  We use the
	 * sequence numbers for IPMB messages that go out of the
	 * interface to match them up with their responses.  A routine
	 * is called periodically to time the items in this list.
	 */
L
Linus Torvalds 已提交
482 483 484 485
	spinlock_t       seq_lock;
	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
	int curr_seq;

486
	/*
487 488 489 490
	 * Messages queued for delivery.  If delivery fails (out of memory
	 * for instance), They will stay in here to be processed later in a
	 * periodic timer interrupt.  The tasklet is for handling received
	 * messages directly from the handler.
491
	 */
492 493
	spinlock_t       waiting_rcv_msgs_lock;
	struct list_head waiting_rcv_msgs;
494 495
	atomic_t	 watchdog_pretimeouts_to_deliver;
	struct tasklet_struct recv_tasklet;
L
Linus Torvalds 已提交
496

497 498 499 500 501
	spinlock_t             xmit_msgs_lock;
	struct list_head       xmit_msgs;
	struct ipmi_smi_msg    *curr_msg;
	struct list_head       hp_xmit_msgs;

502 503 504 505
	/*
	 * The list of command receivers that are registered for commands
	 * on this interface.
	 */
506
	struct mutex     cmd_rcvrs_mutex;
L
Linus Torvalds 已提交
507 508
	struct list_head cmd_rcvrs;

509 510 511 512
	/*
	 * Events that were queues because no one was there to receive
	 * them.
	 */
L
Linus Torvalds 已提交
513 514 515
	spinlock_t       events_lock; /* For dealing with event stuff. */
	struct list_head waiting_events;
	unsigned int     waiting_events_count; /* How many events in queue? */
516 517
	char             delivering_events;
	char             event_msg_printed;
518 519

	/* How many users are waiting for events? */
520 521
	atomic_t         event_waiters;
	unsigned int     ticks_to_req_ev;
522

523 524
	spinlock_t       watch_lock; /* For dealing with watch stuff below. */

525
	/* How many users are waiting for commands? */
526
	unsigned int     command_waiters;
527 528

	/* How many users are waiting for watchdogs? */
529 530 531 532
	unsigned int     watchdog_waiters;

	/* How many users are waiting for message responses? */
	unsigned int     response_waiters;
533 534 535

	/*
	 * Tells what the lower layer has last been asked to watch for,
536
	 * messages and/or watchdogs.  Protected by watch_lock.
537 538
	 */
	unsigned int     last_watch_mask;
L
Linus Torvalds 已提交
539

540 541 542 543
	/*
	 * The event receiver for my BMC, only really used at panic
	 * shutdown as a place to store this.
	 */
L
Linus Torvalds 已提交
544 545 546 547 548
	unsigned char event_receiver;
	unsigned char event_receiver_lun;
	unsigned char local_sel_device;
	unsigned char local_event_generator;

C
Corey Minyard 已提交
549 550
	/* For handling of maintenance mode. */
	int maintenance_mode;
C
Corey Minyard 已提交
551
	bool maintenance_mode_enable;
C
Corey Minyard 已提交
552 553 554
	int auto_maintenance_timeout;
	spinlock_t maintenance_mode_lock; /* Used in a timer... */

555 556 557 558 559 560 561
	/*
	 * If we are doing maintenance on something on IPMB, extend
	 * the timeout time to avoid timeouts writing firmware and
	 * such.
	 */
	int ipmb_maintenance_mode_timeout;

562 563 564 565 566
	/*
	 * A cheap hack, if this is non-null and a message to an
	 * interface comes in with a NULL user, call this routine with
	 * it.  Note that the message will still be freed by the
	 * caller.  This only works on the system interface.
567
	 *
568
	 * Protected by bmc_reg_mutex.
569
	 */
570 571
	void (*null_user_handler)(struct ipmi_smi *intf,
				  struct ipmi_recv_msg *msg);
L
Linus Torvalds 已提交
572

573 574 575 576
	/*
	 * When we are scanning the channels for an SMI, this will
	 * tell which channel we are scanning.
	 */
L
Linus Torvalds 已提交
577 578 579
	int curr_channel;

	/* Channel information */
580 581 582
	struct ipmi_channel_set *channel_list;
	unsigned int curr_working_cset; /* First index into the following. */
	struct ipmi_channel_set wchannels[2];
583
	struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
584
	bool channels_ready;
L
Linus Torvalds 已提交
585

586
	atomic_t stats[IPMI_NUM_STATS];
587 588 589 590 591 592 593

	/*
	 * run_to_completion duplicate of smb_info, smi_info
	 * and ipmi_serial_info structures. Used to decrease numbers of
	 * parameters passed by "low" level IPMI code.
	 */
	int run_to_completion;
L
Linus Torvalds 已提交
594
};
595
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
L
Linus Torvalds 已提交
596

597 598 599
static void __get_guid(struct ipmi_smi *intf);
static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
static int __ipmi_bmc_register(struct ipmi_smi *intf,
600
			       struct ipmi_device_id *id,
601
			       bool guid_set, guid_t *guid, int intf_num);
602
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
603

604

605 606 607
/**
 * The driver model view of the IPMI messaging driver.
 */
608 609 610 611 612
static struct platform_driver ipmidriver = {
	.driver = {
		.name = "ipmi",
		.bus = &platform_bus_type
	}
613
};
614
/*
615
 * This mutex keeps us from adding the same BMC twice.
616
 */
617 618
static DEFINE_MUTEX(ipmidriver_mutex);

619
static LIST_HEAD(ipmi_interfaces);
620
static DEFINE_MUTEX(ipmi_interfaces_mutex);
621
static struct srcu_struct ipmi_interfaces_srcu;
L
Linus Torvalds 已提交
622

623 624 625
/*
 * List of watchers that want to know when smi's are added and deleted.
 */
626
static LIST_HEAD(smi_watchers);
627
static DEFINE_MUTEX(smi_watchers_mutex);
L
Linus Torvalds 已提交
628

629 630 631 632 633
#define ipmi_inc_stat(intf, stat) \
	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
#define ipmi_get_stat(intf, stat) \
	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))

634 635
static const char * const addr_src_to_str[] = {
	"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
636
	"device-tree", "platform"
637
};
638 639 640

const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
{
641
	if (src >= SI_LAST)
642 643 644 645 646
		src = 0; /* Invalid */
	return addr_src_to_str[src];
}
EXPORT_SYMBOL(ipmi_addr_src_to_str);

647 648 649 650 651 652 653 654 655 656 657 658 659 660
static int is_lan_addr(struct ipmi_addr *addr)
{
	return addr->addr_type == IPMI_LAN_ADDR_TYPE;
}

static int is_ipmb_addr(struct ipmi_addr *addr)
{
	return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
}

static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
{
	return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
}
661

662 663 664 665 666 667 668 669 670 671
static void free_recv_msg_list(struct list_head *q)
{
	struct ipmi_recv_msg *msg, *msg2;

	list_for_each_entry_safe(msg, msg2, q, link) {
		list_del(&msg->link);
		ipmi_free_recv_msg(msg);
	}
}

672 673 674 675 676 677 678 679 680 681
static void free_smi_msg_list(struct list_head *q)
{
	struct ipmi_smi_msg *msg, *msg2;

	list_for_each_entry_safe(msg, msg2, q, link) {
		list_del(&msg->link);
		ipmi_free_smi_msg(msg);
	}
}

682
static void clean_up_interface_data(struct ipmi_smi *intf)
683 684 685 686 687
{
	int              i;
	struct cmd_rcvr  *rcvr, *rcvr2;
	struct list_head list;

688 689
	tasklet_kill(&intf->recv_tasklet);

690
	free_smi_msg_list(&intf->waiting_rcv_msgs);
691 692
	free_recv_msg_list(&intf->waiting_events);

693 694 695 696
	/*
	 * Wholesale remove all the entries from the list in the
	 * interface and wait for RCU to know that none are in use.
	 */
697
	mutex_lock(&intf->cmd_rcvrs_mutex);
698 699
	INIT_LIST_HEAD(&list);
	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
700
	mutex_unlock(&intf->cmd_rcvrs_mutex);
701 702 703 704 705 706

	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
		kfree(rcvr);

	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
		if ((intf->seq_table[i].inuse)
707
					&& (intf->seq_table[i].recv_msg))
708 709 710 711 712 713
			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
	}
}

static void intf_free(struct kref *ref)
{
714
	struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
715 716 717 718 719

	clean_up_interface_data(intf);
	kfree(intf);
}

720
struct watcher_entry {
721
	int              intf_num;
722
	struct ipmi_smi  *intf;
723 724 725
	struct list_head link;
};

L
Linus Torvalds 已提交
726 727
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
728
	struct ipmi_smi *intf;
729 730 731 732 733 734 735 736 737
	int index, rv;

	/*
	 * Make sure the driver is actually initialized, this handles
	 * problems with initialization order.
	 */
	rv = ipmi_init_msghandler();
	if (rv)
		return rv;
738

739 740 741
	mutex_lock(&smi_watchers_mutex);

	list_add(&watcher->link, &smi_watchers);
742

743 744 745
	index = srcu_read_lock(&ipmi_interfaces_srcu);
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		int intf_num = READ_ONCE(intf->intf_num);
746

747 748 749
		if (intf_num == -1)
			continue;
		watcher->new_smi(intf_num, intf->si_dev);
L
Linus Torvalds 已提交
750
	}
751
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
752

753
	mutex_unlock(&smi_watchers_mutex);
754

L
Linus Torvalds 已提交
755 756
	return 0;
}
757
EXPORT_SYMBOL(ipmi_smi_watcher_register);
L
Linus Torvalds 已提交
758 759 760

int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
{
761
	mutex_lock(&smi_watchers_mutex);
762
	list_del(&watcher->link);
763
	mutex_unlock(&smi_watchers_mutex);
L
Linus Torvalds 已提交
764 765
	return 0;
}
766
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
L
Linus Torvalds 已提交
767

768 769 770
/*
 * Must be called with smi_watchers_mutex held.
 */
L
Linus Torvalds 已提交
771
static void
772
call_smi_watchers(int i, struct device *dev)
L
Linus Torvalds 已提交
773 774 775
{
	struct ipmi_smi_watcher *w;

776
	mutex_lock(&smi_watchers_mutex);
L
Linus Torvalds 已提交
777 778
	list_for_each_entry(w, &smi_watchers, link) {
		if (try_module_get(w->owner)) {
779
			w->new_smi(i, dev);
L
Linus Torvalds 已提交
780 781 782
			module_put(w->owner);
		}
	}
783
	mutex_unlock(&smi_watchers_mutex);
L
Linus Torvalds 已提交
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
}

static int
ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
{
	if (addr1->addr_type != addr2->addr_type)
		return 0;

	if (addr1->channel != addr2->channel)
		return 0;

	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
		struct ipmi_system_interface_addr *smi_addr1
		    = (struct ipmi_system_interface_addr *) addr1;
		struct ipmi_system_interface_addr *smi_addr2
		    = (struct ipmi_system_interface_addr *) addr2;
		return (smi_addr1->lun == smi_addr2->lun);
	}

803
	if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
L
Linus Torvalds 已提交
804 805 806 807 808 809 810 811 812
		struct ipmi_ipmb_addr *ipmb_addr1
		    = (struct ipmi_ipmb_addr *) addr1;
		struct ipmi_ipmb_addr *ipmb_addr2
		    = (struct ipmi_ipmb_addr *) addr2;

		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
			&& (ipmb_addr1->lun == ipmb_addr2->lun));
	}

813
	if (is_lan_addr(addr1)) {
L
Linus Torvalds 已提交
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
		struct ipmi_lan_addr *lan_addr1
			= (struct ipmi_lan_addr *) addr1;
		struct ipmi_lan_addr *lan_addr2
		    = (struct ipmi_lan_addr *) addr2;

		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
			&& (lan_addr1->session_handle
			    == lan_addr2->session_handle)
			&& (lan_addr1->lun == lan_addr2->lun));
	}

	return 1;
}

int ipmi_validate_addr(struct ipmi_addr *addr, int len)
{
831
	if (len < sizeof(struct ipmi_system_interface_addr))
L
Linus Torvalds 已提交
832 833 834 835 836 837 838 839 840
		return -EINVAL;

	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
		if (addr->channel != IPMI_BMC_CHANNEL)
			return -EINVAL;
		return 0;
	}

	if ((addr->channel == IPMI_BMC_CHANNEL)
841
	    || (addr->channel >= IPMI_MAX_CHANNELS)
L
Linus Torvalds 已提交
842 843 844
	    || (addr->channel < 0))
		return -EINVAL;

845
	if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
846
		if (len < sizeof(struct ipmi_ipmb_addr))
L
Linus Torvalds 已提交
847 848 849 850
			return -EINVAL;
		return 0;
	}

851
	if (is_lan_addr(addr)) {
852
		if (len < sizeof(struct ipmi_lan_addr))
L
Linus Torvalds 已提交
853 854 855 856 857 858
			return -EINVAL;
		return 0;
	}

	return -EINVAL;
}
859
EXPORT_SYMBOL(ipmi_validate_addr);
L
Linus Torvalds 已提交
860 861 862 863 864 865 866

unsigned int ipmi_addr_length(int addr_type)
{
	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
		return sizeof(struct ipmi_system_interface_addr);

	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
867
			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
L
Linus Torvalds 已提交
868 869 870 871 872 873 874
		return sizeof(struct ipmi_ipmb_addr);

	if (addr_type == IPMI_LAN_ADDR_TYPE)
		return sizeof(struct ipmi_lan_addr);

	return 0;
}
875
EXPORT_SYMBOL(ipmi_addr_length);
L
Linus Torvalds 已提交
876

C
Corey Minyard 已提交
877
static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
L
Linus Torvalds 已提交
878
{
C
Corey Minyard 已提交
879
	int rv = 0;
880

C
Corey Minyard 已提交
881
	if (!msg->user) {
882 883 884 885 886
		/* Special handling for NULL users. */
		if (intf->null_user_handler) {
			intf->null_user_handler(intf, msg);
		} else {
			/* No handler, so give up. */
C
Corey Minyard 已提交
887
			rv = -EINVAL;
888 889
		}
		ipmi_free_recv_msg(msg);
890
	} else if (oops_in_progress) {
891 892 893 894 895
		/*
		 * If we are running in the panic context, calling the
		 * receive handler doesn't much meaning and has a deadlock
		 * risk.  At this moment, simply skip it in that case.
		 */
896 897
		ipmi_free_recv_msg(msg);
	} else {
898 899
		int index;
		struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
900

901 902
		if (user) {
			user->handler->ipmi_recv_hndl(msg, user->handler_data);
903
			release_ipmi_user(user, index);
904 905 906 907 908
		} else {
			/* User went away, give up. */
			ipmi_free_recv_msg(msg);
			rv = -EINVAL;
		}
909
	}
C
Corey Minyard 已提交
910 911

	return rv;
L
Linus Torvalds 已提交
912 913
}

C
Corey Minyard 已提交
914 915 916 917 918 919 920 921 922 923 924
static void deliver_local_response(struct ipmi_smi *intf,
				   struct ipmi_recv_msg *msg)
{
	if (deliver_response(intf, msg))
		ipmi_inc_stat(intf, unhandled_local_responses);
	else
		ipmi_inc_stat(intf, handled_local_responses);
}

static void deliver_err_response(struct ipmi_smi *intf,
				 struct ipmi_recv_msg *msg, int err)
925 926 927 928 929 930
{
	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
	msg->msg_data[0] = err;
	msg->msg.netfn |= 1; /* Convert to a response. */
	msg->msg.data_len = 1;
	msg->msg.data = msg->msg_data;
C
Corey Minyard 已提交
931
	deliver_local_response(intf, msg);
932 933
}

934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
{
	unsigned long iflags;

	if (!intf->handlers->set_need_watch)
		return;

	spin_lock_irqsave(&intf->watch_lock, iflags);
	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
		intf->response_waiters++;

	if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
		intf->watchdog_waiters++;

	if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
		intf->command_waiters++;

	if ((intf->last_watch_mask & flags) != flags) {
		intf->last_watch_mask |= flags;
		intf->handlers->set_need_watch(intf->send_info,
					       intf->last_watch_mask);
	}
	spin_unlock_irqrestore(&intf->watch_lock, iflags);
}

static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
{
	unsigned long iflags;

	if (!intf->handlers->set_need_watch)
		return;

	spin_lock_irqsave(&intf->watch_lock, iflags);
	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
		intf->response_waiters--;

	if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
		intf->watchdog_waiters--;

	if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
		intf->command_waiters--;

	flags = 0;
	if (intf->response_waiters)
		flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
	if (intf->watchdog_waiters)
		flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
	if (intf->command_waiters)
		flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;

	if (intf->last_watch_mask != flags) {
		intf->last_watch_mask = flags;
		intf->handlers->set_need_watch(intf->send_info,
					       intf->last_watch_mask);
	}
	spin_unlock_irqrestore(&intf->watch_lock, iflags);
}

992 993 994 995 996
/*
 * Find the next sequence number not being used and add the given
 * message with the given timeout to the sequence table.  This must be
 * called with the interface's seq_lock held.
 */
997
static int intf_next_seq(struct ipmi_smi      *intf,
L
Linus Torvalds 已提交
998 999 1000 1001 1002 1003 1004 1005 1006 1007
			 struct ipmi_recv_msg *recv_msg,
			 unsigned long        timeout,
			 int                  retries,
			 int                  broadcast,
			 unsigned char        *seq,
			 long                 *seqid)
{
	int          rv = 0;
	unsigned int i;

1008 1009 1010 1011 1012
	if (timeout == 0)
		timeout = default_retry_ms;
	if (retries < 0)
		retries = default_max_retries;

1013 1014
	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1015
		if (!intf->seq_table[i].inuse)
L
Linus Torvalds 已提交
1016 1017 1018
			break;
	}

1019
	if (!intf->seq_table[i].inuse) {
L
Linus Torvalds 已提交
1020 1021
		intf->seq_table[i].recv_msg = recv_msg;

1022 1023 1024 1025
		/*
		 * Start with the maximum timeout, when the send response
		 * comes in we will start the real timer.
		 */
L
Linus Torvalds 已提交
1026 1027 1028 1029 1030 1031 1032 1033 1034
		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
		intf->seq_table[i].orig_timeout = timeout;
		intf->seq_table[i].retries_left = retries;
		intf->seq_table[i].broadcast = broadcast;
		intf->seq_table[i].inuse = 1;
		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
		*seq = i;
		*seqid = intf->seq_table[i].seqid;
		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1035
		smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1036
		need_waiter(intf);
L
Linus Torvalds 已提交
1037 1038 1039
	} else {
		rv = -EAGAIN;
	}
1040

L
Linus Torvalds 已提交
1041 1042 1043
	return rv;
}

1044 1045 1046 1047 1048 1049 1050
/*
 * Return the receive message for the given sequence number and
 * release the sequence number so it can be reused.  Some other data
 * is passed in to be sure the message matches up correctly (to help
 * guard against message coming in after their timeout and the
 * sequence number being reused).
 */
1051
static int intf_find_seq(struct ipmi_smi      *intf,
L
Linus Torvalds 已提交
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
			 unsigned char        seq,
			 short                channel,
			 unsigned char        cmd,
			 unsigned char        netfn,
			 struct ipmi_addr     *addr,
			 struct ipmi_recv_msg **recv_msg)
{
	int           rv = -ENODEV;
	unsigned long flags;

	if (seq >= IPMI_IPMB_NUM_SEQ)
		return -EINVAL;

1065
	spin_lock_irqsave(&intf->seq_lock, flags);
L
Linus Torvalds 已提交
1066 1067 1068
	if (intf->seq_table[seq].inuse) {
		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;

1069 1070
		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
				&& (msg->msg.netfn == netfn)
1071
				&& (ipmi_addr_equal(addr, &msg->addr))) {
L
Linus Torvalds 已提交
1072 1073
			*recv_msg = msg;
			intf->seq_table[seq].inuse = 0;
1074
			smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
L
Linus Torvalds 已提交
1075 1076 1077
			rv = 0;
		}
	}
1078
	spin_unlock_irqrestore(&intf->seq_lock, flags);
L
Linus Torvalds 已提交
1079 1080 1081 1082 1083 1084

	return rv;
}


/* Start the timer for a specific sequence table entry. */
1085
static int intf_start_seq_timer(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
				long       msgid)
{
	int           rv = -ENODEV;
	unsigned long flags;
	unsigned char seq;
	unsigned long seqid;


	GET_SEQ_FROM_MSGID(msgid, seq, seqid);

1096
	spin_lock_irqsave(&intf->seq_lock, flags);
1097 1098 1099 1100
	/*
	 * We do this verification because the user can be deleted
	 * while a message is outstanding.
	 */
L
Linus Torvalds 已提交
1101
	if ((intf->seq_table[seq].inuse)
1102
				&& (intf->seq_table[seq].seqid == seqid)) {
1103
		struct seq_table *ent = &intf->seq_table[seq];
L
Linus Torvalds 已提交
1104 1105 1106
		ent->timeout = ent->orig_timeout;
		rv = 0;
	}
1107
	spin_unlock_irqrestore(&intf->seq_lock, flags);
L
Linus Torvalds 已提交
1108 1109 1110 1111 1112

	return rv;
}

/* Got an error for the send message for a specific sequence number. */
1113
static int intf_err_seq(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
			long         msgid,
			unsigned int err)
{
	int                  rv = -ENODEV;
	unsigned long        flags;
	unsigned char        seq;
	unsigned long        seqid;
	struct ipmi_recv_msg *msg = NULL;


	GET_SEQ_FROM_MSGID(msgid, seq, seqid);

1126
	spin_lock_irqsave(&intf->seq_lock, flags);
1127 1128 1129 1130
	/*
	 * We do this verification because the user can be deleted
	 * while a message is outstanding.
	 */
L
Linus Torvalds 已提交
1131
	if ((intf->seq_table[seq].inuse)
1132
				&& (intf->seq_table[seq].seqid == seqid)) {
1133
		struct seq_table *ent = &intf->seq_table[seq];
L
Linus Torvalds 已提交
1134 1135

		ent->inuse = 0;
1136
		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
L
Linus Torvalds 已提交
1137 1138 1139
		msg = ent->recv_msg;
		rv = 0;
	}
1140
	spin_unlock_irqrestore(&intf->seq_lock, flags);
L
Linus Torvalds 已提交
1141

1142
	if (msg)
C
Corey Minyard 已提交
1143
		deliver_err_response(intf, msg, err);
L
Linus Torvalds 已提交
1144 1145 1146 1147

	return rv;
}

1148 1149 1150 1151 1152 1153 1154 1155 1156
static void free_user_work(struct work_struct *work)
{
	struct ipmi_user *user = container_of(work, struct ipmi_user,
					      remove_work);

	cleanup_srcu_struct(&user->release_barrier);
	kfree(user);
}

L
Linus Torvalds 已提交
1157
int ipmi_create_user(unsigned int          if_num,
C
Corey Minyard 已提交
1158
		     const struct ipmi_user_hndl *handler,
L
Linus Torvalds 已提交
1159
		     void                  *handler_data,
1160
		     struct ipmi_user      **user)
L
Linus Torvalds 已提交
1161 1162
{
	unsigned long flags;
1163
	struct ipmi_user *new_user;
1164
	int           rv, index;
1165
	struct ipmi_smi *intf;
L
Linus Torvalds 已提交
1166

1167 1168 1169 1170 1171 1172 1173
	/*
	 * There is no module usecount here, because it's not
	 * required.  Since this can only be used by and called from
	 * other modules, they will implicitly use this module, and
	 * thus this can't be removed unless the other modules are
	 * removed.
	 */
L
Linus Torvalds 已提交
1174 1175 1176 1177

	if (handler == NULL)
		return -EINVAL;

1178 1179 1180 1181
	/*
	 * Make sure the driver is actually initialized, this handles
	 * problems with initialization order.
	 */
1182 1183 1184
	rv = ipmi_init_msghandler();
	if (rv)
		return rv;
L
Linus Torvalds 已提交
1185 1186

	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1187
	if (!new_user)
L
Linus Torvalds 已提交
1188 1189
		return -ENOMEM;

1190
	index = srcu_read_lock(&ipmi_interfaces_srcu);
1191 1192 1193
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		if (intf->intf_num == if_num)
			goto found;
L
Linus Torvalds 已提交
1194
	}
1195
	/* Not found, return an error */
1196 1197
	rv = -EINVAL;
	goto out_kfree;
L
Linus Torvalds 已提交
1198

1199
 found:
1200 1201
	INIT_WORK(&new_user->remove_work, free_user_work);

1202 1203 1204 1205
	rv = init_srcu_struct(&new_user->release_barrier);
	if (rv)
		goto out_kfree;

1206 1207 1208 1209 1210
	if (!try_module_get(intf->owner)) {
		rv = -ENODEV;
		goto out_kfree;
	}

1211 1212
	/* Note that each existing user holds a refcount to the interface. */
	kref_get(&intf->refcount);
L
Linus Torvalds 已提交
1213

1214
	kref_init(&new_user->refcount);
L
Linus Torvalds 已提交
1215 1216 1217
	new_user->handler = handler;
	new_user->handler_data = handler_data;
	new_user->intf = intf;
1218
	new_user->gets_events = false;
L
Linus Torvalds 已提交
1219

1220
	rcu_assign_pointer(new_user->self, new_user);
1221 1222 1223
	spin_lock_irqsave(&intf->seq_lock, flags);
	list_add_rcu(&new_user->link, &intf->users);
	spin_unlock_irqrestore(&intf->seq_lock, flags);
1224
	if (handler->ipmi_watchdog_pretimeout)
1225
		/* User wants pretimeouts, so make sure to watch for them. */
1226
		smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1227
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1228 1229
	*user = new_user;
	return 0;
L
Linus Torvalds 已提交
1230

1231
out_kfree:
1232
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1233
	kfree(new_user);
L
Linus Torvalds 已提交
1234 1235
	return rv;
}
1236
EXPORT_SYMBOL(ipmi_create_user);
L
Linus Torvalds 已提交
1237

1238 1239
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
1240
	int rv, index;
1241
	struct ipmi_smi *intf;
1242

1243
	index = srcu_read_lock(&ipmi_interfaces_srcu);
1244 1245 1246 1247
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		if (intf->intf_num == if_num)
			goto found;
	}
1248 1249
	srcu_read_unlock(&ipmi_interfaces_srcu, index);

1250
	/* Not found, return an error */
1251
	return -EINVAL;
1252 1253

found:
1254 1255 1256 1257 1258
	if (!intf->handlers->get_smi_info)
		rv = -ENOTTY;
	else
		rv = intf->handlers->get_smi_info(intf->send_info, data);
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1259 1260 1261 1262 1263

	return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);

1264 1265
static void free_user(struct kref *ref)
{
1266
	struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1267 1268 1269

	/* SRCU cleanup must happen in task context. */
	schedule_work(&user->remove_work);
1270 1271
}

1272
static void _ipmi_destroy_user(struct ipmi_user *user)
L
Linus Torvalds 已提交
1273
{
1274
	struct ipmi_smi  *intf = user->intf;
L
Linus Torvalds 已提交
1275 1276
	int              i;
	unsigned long    flags;
1277 1278
	struct cmd_rcvr  *rcvr;
	struct cmd_rcvr  *rcvrs = NULL;
L
Linus Torvalds 已提交
1279

1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
	if (!acquire_ipmi_user(user, &i)) {
		/*
		 * The user has already been cleaned up, just make sure
		 * nothing is using it and return.
		 */
		synchronize_srcu(&user->release_barrier);
		return;
	}

	rcu_assign_pointer(user->self, NULL);
	release_ipmi_user(user, i);

	synchronize_srcu(&user->release_barrier);

	if (user->handler->shutdown)
		user->handler->shutdown(user->handler_data);
L
Linus Torvalds 已提交
1296

1297
	if (user->handler->ipmi_watchdog_pretimeout)
1298
		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1299 1300 1301 1302

	if (user->gets_events)
		atomic_dec(&intf->event_waiters);

1303 1304 1305
	/* Remove the user from the interface's sequence table. */
	spin_lock_irqsave(&intf->seq_lock, flags);
	list_del_rcu(&user->link);
L
Linus Torvalds 已提交
1306

C
Corey Minyard 已提交
1307
	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1308
		if (intf->seq_table[i].inuse
1309
		    && (intf->seq_table[i].recv_msg->user == user)) {
1310
			intf->seq_table[i].inuse = 0;
1311
			smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1312
			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
L
Linus Torvalds 已提交
1313 1314
		}
	}
1315 1316 1317 1318 1319 1320
	spin_unlock_irqrestore(&intf->seq_lock, flags);

	/*
	 * Remove the user from the command receiver's table.  First
	 * we build a list of everything (not using the standard link,
	 * since other things may be using it till we do
1321
	 * synchronize_srcu()) then free everything in that list.
1322
	 */
1323
	mutex_lock(&intf->cmd_rcvrs_mutex);
1324
	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
L
Linus Torvalds 已提交
1325
		if (rcvr->user == user) {
1326 1327 1328
			list_del_rcu(&rcvr->link);
			rcvr->next = rcvrs;
			rcvrs = rcvr;
L
Linus Torvalds 已提交
1329 1330
		}
	}
1331
	mutex_unlock(&intf->cmd_rcvrs_mutex);
1332 1333 1334 1335 1336 1337
	synchronize_rcu();
	while (rcvrs) {
		rcvr = rcvrs;
		rcvrs = rcvr->next;
		kfree(rcvr);
	}
L
Linus Torvalds 已提交
1338

1339
	kref_put(&intf->refcount, intf_free);
1340
	module_put(intf->owner);
1341 1342 1343 1344 1345
}

int ipmi_destroy_user(struct ipmi_user *user)
{
	_ipmi_destroy_user(user);
L
Linus Torvalds 已提交
1346

1347
	kref_put(&user->refcount, free_user);
L
Linus Torvalds 已提交
1348

1349
	return 0;
L
Linus Torvalds 已提交
1350
}
1351
EXPORT_SYMBOL(ipmi_destroy_user);
L
Linus Torvalds 已提交
1352

1353
int ipmi_get_version(struct ipmi_user *user,
1354 1355
		     unsigned char *major,
		     unsigned char *minor)
L
Linus Torvalds 已提交
1356
{
1357
	struct ipmi_device_id id;
1358
	int rv, index;
1359

1360 1361 1362
	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;
1363

1364 1365 1366 1367 1368 1369
	rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
	if (!rv) {
		*major = ipmi_version_major(&id);
		*minor = ipmi_version_minor(&id);
	}
	release_ipmi_user(user, index);
1370

1371
	return rv;
L
Linus Torvalds 已提交
1372
}
1373
EXPORT_SYMBOL(ipmi_get_version);
L
Linus Torvalds 已提交
1374

1375
int ipmi_set_my_address(struct ipmi_user *user,
1376 1377
			unsigned int  channel,
			unsigned char address)
L
Linus Torvalds 已提交
1378
{
1379
	int index, rv = 0;
1380 1381 1382 1383 1384

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

1385
	if (channel >= IPMI_MAX_CHANNELS) {
1386
		rv = -EINVAL;
1387 1388
	} else {
		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1389
		user->intf->addrinfo[channel].address = address;
1390
	}
1391 1392
	release_ipmi_user(user, index);

1393
	return rv;
L
Linus Torvalds 已提交
1394
}
1395
EXPORT_SYMBOL(ipmi_set_my_address);
L
Linus Torvalds 已提交
1396

1397
int ipmi_get_my_address(struct ipmi_user *user,
1398 1399
			unsigned int  channel,
			unsigned char *address)
L
Linus Torvalds 已提交
1400
{
1401
	int index, rv = 0;
1402 1403 1404 1405 1406

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

1407
	if (channel >= IPMI_MAX_CHANNELS) {
1408
		rv = -EINVAL;
1409 1410
	} else {
		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1411
		*address = user->intf->addrinfo[channel].address;
1412
	}
1413 1414
	release_ipmi_user(user, index);

1415
	return rv;
L
Linus Torvalds 已提交
1416
}
1417
EXPORT_SYMBOL(ipmi_get_my_address);
L
Linus Torvalds 已提交
1418

1419
int ipmi_set_my_LUN(struct ipmi_user *user,
1420 1421
		    unsigned int  channel,
		    unsigned char LUN)
L
Linus Torvalds 已提交
1422
{
1423
	int index, rv = 0;
1424 1425 1426 1427 1428

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

1429
	if (channel >= IPMI_MAX_CHANNELS) {
1430
		rv = -EINVAL;
1431 1432
	} else {
		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1433
		user->intf->addrinfo[channel].lun = LUN & 0x3;
1434
	}
1435 1436
	release_ipmi_user(user, index);

1437
	return rv;
L
Linus Torvalds 已提交
1438
}
1439
EXPORT_SYMBOL(ipmi_set_my_LUN);
L
Linus Torvalds 已提交
1440

1441
int ipmi_get_my_LUN(struct ipmi_user *user,
1442 1443
		    unsigned int  channel,
		    unsigned char *address)
L
Linus Torvalds 已提交
1444
{
1445
	int index, rv = 0;
1446 1447 1448 1449 1450

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

1451
	if (channel >= IPMI_MAX_CHANNELS) {
1452
		rv = -EINVAL;
1453 1454
	} else {
		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1455
		*address = user->intf->addrinfo[channel].lun;
1456
	}
1457 1458
	release_ipmi_user(user, index);

1459
	return rv;
L
Linus Torvalds 已提交
1460
}
1461
EXPORT_SYMBOL(ipmi_get_my_LUN);
L
Linus Torvalds 已提交
1462

1463
int ipmi_get_maintenance_mode(struct ipmi_user *user)
C
Corey Minyard 已提交
1464
{
1465
	int mode, index;
C
Corey Minyard 已提交
1466 1467
	unsigned long flags;

1468 1469 1470 1471
	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

C
Corey Minyard 已提交
1472 1473 1474
	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
	mode = user->intf->maintenance_mode;
	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1475
	release_ipmi_user(user, index);
C
Corey Minyard 已提交
1476 1477 1478 1479 1480

	return mode;
}
EXPORT_SYMBOL(ipmi_get_maintenance_mode);

1481
static void maintenance_mode_update(struct ipmi_smi *intf)
C
Corey Minyard 已提交
1482 1483 1484 1485 1486 1487
{
	if (intf->handlers->set_maintenance_mode)
		intf->handlers->set_maintenance_mode(
			intf->send_info, intf->maintenance_mode_enable);
}

1488
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
C
Corey Minyard 已提交
1489
{
1490
	int rv = 0, index;
C
Corey Minyard 已提交
1491
	unsigned long flags;
1492
	struct ipmi_smi *intf = user->intf;
C
Corey Minyard 已提交
1493

1494 1495 1496 1497
	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

C
Corey Minyard 已提交
1498 1499 1500 1501 1502 1503 1504 1505 1506
	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
	if (intf->maintenance_mode != mode) {
		switch (mode) {
		case IPMI_MAINTENANCE_MODE_AUTO:
			intf->maintenance_mode_enable
				= (intf->auto_maintenance_timeout > 0);
			break;

		case IPMI_MAINTENANCE_MODE_OFF:
C
Corey Minyard 已提交
1507
			intf->maintenance_mode_enable = false;
C
Corey Minyard 已提交
1508 1509 1510
			break;

		case IPMI_MAINTENANCE_MODE_ON:
C
Corey Minyard 已提交
1511
			intf->maintenance_mode_enable = true;
C
Corey Minyard 已提交
1512 1513 1514 1515 1516 1517
			break;

		default:
			rv = -EINVAL;
			goto out_unlock;
		}
C
Corey Minyard 已提交
1518
		intf->maintenance_mode = mode;
C
Corey Minyard 已提交
1519 1520 1521 1522 1523

		maintenance_mode_update(intf);
	}
 out_unlock:
	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1524
	release_ipmi_user(user, index);
C
Corey Minyard 已提交
1525 1526 1527 1528 1529

	return rv;
}
EXPORT_SYMBOL(ipmi_set_maintenance_mode);

1530
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
L
Linus Torvalds 已提交
1531
{
1532
	unsigned long        flags;
1533
	struct ipmi_smi      *intf = user->intf;
1534 1535
	struct ipmi_recv_msg *msg, *msg2;
	struct list_head     msgs;
1536 1537 1538 1539 1540
	int index;

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;
L
Linus Torvalds 已提交
1541

1542 1543 1544
	INIT_LIST_HEAD(&msgs);

	spin_lock_irqsave(&intf->events_lock, flags);
1545 1546 1547
	if (user->gets_events == val)
		goto out;

L
Linus Torvalds 已提交
1548 1549
	user->gets_events = val;

1550 1551 1552 1553 1554 1555 1556
	if (val) {
		if (atomic_inc_return(&intf->event_waiters) == 1)
			need_waiter(intf);
	} else {
		atomic_dec(&intf->event_waiters);
	}

1557 1558 1559 1560 1561 1562 1563 1564 1565
	if (intf->delivering_events)
		/*
		 * Another thread is delivering events for this, so
		 * let it handle any new events.
		 */
		goto out;

	/* Deliver any queued events. */
	while (user->gets_events && !list_empty(&intf->waiting_events)) {
A
Akinobu Mita 已提交
1566 1567
		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
			list_move_tail(&msg->link, &msgs);
1568
		intf->waiting_events_count = 0;
1569
		if (intf->event_msg_printed) {
1570
			dev_warn(intf->si_dev, "Event queue no longer full\n");
1571 1572
			intf->event_msg_printed = 0;
		}
1573

1574 1575 1576 1577 1578 1579
		intf->delivering_events = 1;
		spin_unlock_irqrestore(&intf->events_lock, flags);

		list_for_each_entry_safe(msg, msg2, &msgs, link) {
			msg->user = user;
			kref_get(&user->refcount);
C
Corey Minyard 已提交
1580
			deliver_local_response(intf, msg);
1581 1582 1583 1584
		}

		spin_lock_irqsave(&intf->events_lock, flags);
		intf->delivering_events = 0;
1585 1586
	}

1587
 out:
1588
	spin_unlock_irqrestore(&intf->events_lock, flags);
1589
	release_ipmi_user(user, index);
L
Linus Torvalds 已提交
1590 1591 1592

	return 0;
}
1593
EXPORT_SYMBOL(ipmi_set_gets_events);
L
Linus Torvalds 已提交
1594

1595
static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1596
				      unsigned char netfn,
1597 1598
				      unsigned char cmd,
				      unsigned char chan)
1599 1600 1601 1602
{
	struct cmd_rcvr *rcvr;

	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1603 1604
		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
					&& (rcvr->chans & (1 << chan)))
1605 1606 1607 1608 1609
			return rcvr;
	}
	return NULL;
}

1610
static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
				 unsigned char netfn,
				 unsigned char cmd,
				 unsigned int  chans)
{
	struct cmd_rcvr *rcvr;

	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
					&& (rcvr->chans & chans))
			return 0;
	}
	return 1;
}

1625
int ipmi_register_for_cmd(struct ipmi_user *user,
L
Linus Torvalds 已提交
1626
			  unsigned char netfn,
1627 1628
			  unsigned char cmd,
			  unsigned int  chans)
L
Linus Torvalds 已提交
1629
{
1630
	struct ipmi_smi *intf = user->intf;
1631
	struct cmd_rcvr *rcvr;
1632
	int rv = 0, index;
L
Linus Torvalds 已提交
1633

1634 1635 1636
	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;
L
Linus Torvalds 已提交
1637 1638

	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1639 1640 1641 1642
	if (!rcvr) {
		rv = -ENOMEM;
		goto out_release;
	}
1643 1644
	rcvr->cmd = cmd;
	rcvr->netfn = netfn;
1645
	rcvr->chans = chans;
1646
	rcvr->user = user;
L
Linus Torvalds 已提交
1647

1648
	mutex_lock(&intf->cmd_rcvrs_mutex);
L
Linus Torvalds 已提交
1649
	/* Make sure the command/netfn is not already registered. */
1650
	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1651 1652
		rv = -EBUSY;
		goto out_unlock;
L
Linus Torvalds 已提交
1653
	}
1654

1655
	smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1656

1657
	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
L
Linus Torvalds 已提交
1658

1659
out_unlock:
1660
	mutex_unlock(&intf->cmd_rcvrs_mutex);
L
Linus Torvalds 已提交
1661 1662
	if (rv)
		kfree(rcvr);
1663
out_release:
1664
	release_ipmi_user(user, index);
L
Linus Torvalds 已提交
1665 1666 1667

	return rv;
}
1668
EXPORT_SYMBOL(ipmi_register_for_cmd);
L
Linus Torvalds 已提交
1669

1670
int ipmi_unregister_for_cmd(struct ipmi_user *user,
L
Linus Torvalds 已提交
1671
			    unsigned char netfn,
1672 1673
			    unsigned char cmd,
			    unsigned int  chans)
L
Linus Torvalds 已提交
1674
{
1675
	struct ipmi_smi *intf = user->intf;
1676
	struct cmd_rcvr *rcvr;
1677
	struct cmd_rcvr *rcvrs = NULL;
1678 1679 1680 1681 1682
	int i, rv = -ENOENT, index;

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;
L
Linus Torvalds 已提交
1683

1684
	mutex_lock(&intf->cmd_rcvrs_mutex);
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
		if (((1 << i) & chans) == 0)
			continue;
		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
		if (rcvr == NULL)
			continue;
		if (rcvr->user == user) {
			rv = 0;
			rcvr->chans &= ~chans;
			if (rcvr->chans == 0) {
				list_del_rcu(&rcvr->link);
				rcvr->next = rcvrs;
				rcvrs = rcvr;
			}
		}
	}
	mutex_unlock(&intf->cmd_rcvrs_mutex);
	synchronize_rcu();
1703
	release_ipmi_user(user, index);
1704
	while (rcvrs) {
1705
		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1706 1707
		rcvr = rcvrs;
		rcvrs = rcvr->next;
1708
		kfree(rcvr);
L
Linus Torvalds 已提交
1709
	}
1710

1711
	return rv;
L
Linus Torvalds 已提交
1712
}
1713
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
L
Linus Torvalds 已提交
1714 1715 1716 1717 1718

static unsigned char
ipmb_checksum(unsigned char *data, int size)
{
	unsigned char csum = 0;
1719

L
Linus Torvalds 已提交
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
	for (; size > 0; size--, data++)
		csum += *data;

	return -csum;
}

static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
				   struct kernel_ipmi_msg *msg,
				   struct ipmi_ipmb_addr *ipmb_addr,
				   long                  msgid,
				   unsigned char         ipmb_seq,
				   int                   broadcast,
				   unsigned char         source_address,
				   unsigned char         source_lun)
{
	int i = broadcast;

	/* Format the IPMB header data. */
	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
	smi_msg->data[2] = ipmb_addr->channel;
	if (broadcast)
		smi_msg->data[3] = 0;
	smi_msg->data[i+3] = ipmb_addr->slave_addr;
	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1745
	smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
L
Linus Torvalds 已提交
1746 1747 1748 1749 1750 1751
	smi_msg->data[i+6] = source_address;
	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
	smi_msg->data[i+8] = msg->cmd;

	/* Now tack on the data to the message. */
	if (msg->data_len > 0)
1752
		memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
L
Linus Torvalds 已提交
1753 1754 1755 1756
	smi_msg->data_size = msg->data_len + 9;

	/* Now calculate the checksum and tack it on. */
	smi_msg->data[i+smi_msg->data_size]
1757
		= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
L
Linus Torvalds 已提交
1758

1759 1760 1761 1762
	/*
	 * Add on the checksum size and the offset from the
	 * broadcast.
	 */
L
Linus Torvalds 已提交
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
	smi_msg->data_size += 1 + i;

	smi_msg->msgid = msgid;
}

static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
				  struct kernel_ipmi_msg *msg,
				  struct ipmi_lan_addr  *lan_addr,
				  long                  msgid,
				  unsigned char         ipmb_seq,
				  unsigned char         source_lun)
{
	/* Format the IPMB header data. */
	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
	smi_msg->data[2] = lan_addr->channel;
	smi_msg->data[3] = lan_addr->session_handle;
	smi_msg->data[4] = lan_addr->remote_SWID;
	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1782
	smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
L
Linus Torvalds 已提交
1783 1784 1785 1786 1787 1788
	smi_msg->data[7] = lan_addr->local_SWID;
	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
	smi_msg->data[9] = msg->cmd;

	/* Now tack on the data to the message. */
	if (msg->data_len > 0)
1789
		memcpy(&smi_msg->data[10], msg->data, msg->data_len);
L
Linus Torvalds 已提交
1790 1791 1792 1793
	smi_msg->data_size = msg->data_len + 10;

	/* Now calculate the checksum and tack it on. */
	smi_msg->data[smi_msg->data_size]
1794
		= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
L
Linus Torvalds 已提交
1795

1796 1797 1798 1799
	/*
	 * Add on the checksum size and the offset from the
	 * broadcast.
	 */
L
Linus Torvalds 已提交
1800 1801 1802 1803 1804
	smi_msg->data_size += 1;

	smi_msg->msgid = msgid;
}

1805
static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
A
Arnd Bergmann 已提交
1806 1807
					     struct ipmi_smi_msg *smi_msg,
					     int priority)
1808
{
1809 1810 1811 1812 1813 1814 1815 1816 1817
	if (intf->curr_msg) {
		if (priority > 0)
			list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
		else
			list_add_tail(&smi_msg->link, &intf->xmit_msgs);
		smi_msg = NULL;
	} else {
		intf->curr_msg = smi_msg;
	}
A
Arnd Bergmann 已提交
1818 1819 1820 1821

	return smi_msg;
}

1822 1823
static void smi_send(struct ipmi_smi *intf,
		     const struct ipmi_smi_handlers *handlers,
A
Arnd Bergmann 已提交
1824 1825 1826
		     struct ipmi_smi_msg *smi_msg, int priority)
{
	int run_to_completion = intf->run_to_completion;
1827
	unsigned long flags = 0;
A
Arnd Bergmann 已提交
1828

1829
	if (!run_to_completion)
A
Arnd Bergmann 已提交
1830
		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1831 1832 1833
	smi_msg = smi_add_send_msg(intf, smi_msg, priority);

	if (!run_to_completion)
1834 1835 1836
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);

	if (smi_msg)
1837
		handlers->sender(intf->send_info, smi_msg);
1838 1839
}

1840 1841 1842 1843 1844 1845 1846 1847
static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
{
	return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
		 && ((msg->cmd == IPMI_COLD_RESET_CMD)
		     || (msg->cmd == IPMI_WARM_RESET_CMD)))
		|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
}

1848
static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
C
Corey Minyard 已提交
1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
			      struct ipmi_addr       *addr,
			      long                   msgid,
			      struct kernel_ipmi_msg *msg,
			      struct ipmi_smi_msg    *smi_msg,
			      struct ipmi_recv_msg   *recv_msg,
			      int                    retries,
			      unsigned int           retry_time_ms)
{
	struct ipmi_system_interface_addr *smi_addr;

	if (msg->netfn & 1)
		/* Responses are not allowed to the SMI. */
		return -EINVAL;

	smi_addr = (struct ipmi_system_interface_addr *) addr;
	if (smi_addr->lun > 3) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));

	if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
	    && ((msg->cmd == IPMI_SEND_MSG_CMD)
		|| (msg->cmd == IPMI_GET_MSG_CMD)
		|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
		/*
		 * We don't let the user do these, since we manage
		 * the sequence numbers.
		 */
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	if (is_maintenance_mode_cmd(msg)) {
		unsigned long flags;

		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
		intf->auto_maintenance_timeout
			= maintenance_mode_timeout_ms;
		if (!intf->maintenance_mode
		    && !intf->maintenance_mode_enable) {
			intf->maintenance_mode_enable = true;
			maintenance_mode_update(intf);
		}
		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
				       flags);
	}

	if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EMSGSIZE;
	}

	smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
	smi_msg->data[1] = msg->cmd;
	smi_msg->msgid = msgid;
	smi_msg->user_data = recv_msg;
	if (msg->data_len > 0)
		memcpy(&smi_msg->data[2], msg->data, msg->data_len);
	smi_msg->data_size = msg->data_len + 2;
	ipmi_inc_stat(intf, sent_local_commands);

	return 0;
}

1915
static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
C
Corey Minyard 已提交
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
			   struct ipmi_addr       *addr,
			   long                   msgid,
			   struct kernel_ipmi_msg *msg,
			   struct ipmi_smi_msg    *smi_msg,
			   struct ipmi_recv_msg   *recv_msg,
			   unsigned char          source_address,
			   unsigned char          source_lun,
			   int                    retries,
			   unsigned int           retry_time_ms)
{
	struct ipmi_ipmb_addr *ipmb_addr;
	unsigned char ipmb_seq;
	long seqid;
	int broadcast = 0;
	struct ipmi_channel *chans;
	int rv = 0;

	if (addr->channel >= IPMI_MAX_CHANNELS) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	chans = READ_ONCE(intf->channel_list)->c;

	if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
		/*
		 * Broadcasts add a zero at the beginning of the
		 * message, but otherwise is the same as an IPMB
		 * address.
		 */
		addr->addr_type = IPMI_IPMB_ADDR_TYPE;
		broadcast = 1;
		retries = 0; /* Don't retry broadcasts. */
	}

	/*
	 * 9 for the header and 1 for the checksum, plus
	 * possibly one for the broadcast.
	 */
	if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EMSGSIZE;
	}

	ipmb_addr = (struct ipmi_ipmb_addr *) addr;
	if (ipmb_addr->lun > 3) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));

	if (recv_msg->msg.netfn & 0x1) {
		/*
		 * It's a response, so use the user's sequence
		 * from msgid.
		 */
		ipmi_inc_stat(intf, sent_ipmb_responses);
		format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
				msgid, broadcast,
				source_address, source_lun);

		/*
		 * Save the receive message so we can use it
		 * to deliver the response.
		 */
		smi_msg->user_data = recv_msg;
	} else {
		/* It's a command, so get a sequence for it. */
		unsigned long flags;

		spin_lock_irqsave(&intf->seq_lock, flags);

		if (is_maintenance_mode_cmd(msg))
			intf->ipmb_maintenance_mode_timeout =
				maintenance_mode_timeout_ms;

		if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
			/* Different default in maintenance mode */
			retry_time_ms = default_maintenance_retry_ms;

		/*
		 * Create a sequence number with a 1 second
		 * timeout and 4 retries.
		 */
		rv = intf_next_seq(intf,
				   recv_msg,
				   retry_time_ms,
				   retries,
				   broadcast,
				   &ipmb_seq,
				   &seqid);
		if (rv)
			/*
			 * We have used up all the sequence numbers,
			 * probably, so abort.
			 */
			goto out_err;

		ipmi_inc_stat(intf, sent_ipmb_commands);

		/*
		 * Store the sequence number in the message,
		 * so that when the send message response
		 * comes back we can start the timer.
		 */
		format_ipmb_msg(smi_msg, msg, ipmb_addr,
				STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
				ipmb_seq, broadcast,
				source_address, source_lun);

		/*
		 * Copy the message into the recv message data, so we
		 * can retransmit it later if necessary.
		 */
		memcpy(recv_msg->msg_data, smi_msg->data,
		       smi_msg->data_size);
		recv_msg->msg.data = recv_msg->msg_data;
		recv_msg->msg.data_len = smi_msg->data_size;

		/*
		 * We don't unlock until here, because we need
		 * to copy the completed message into the
		 * recv_msg before we release the lock.
		 * Otherwise, race conditions may bite us.  I
		 * know that's pretty paranoid, but I prefer
		 * to be correct.
		 */
out_err:
		spin_unlock_irqrestore(&intf->seq_lock, flags);
	}

	return rv;
}

2056
static int i_ipmi_req_lan(struct ipmi_smi        *intf,
C
Corey Minyard 已提交
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
			  struct ipmi_addr       *addr,
			  long                   msgid,
			  struct kernel_ipmi_msg *msg,
			  struct ipmi_smi_msg    *smi_msg,
			  struct ipmi_recv_msg   *recv_msg,
			  unsigned char          source_lun,
			  int                    retries,
			  unsigned int           retry_time_ms)
{
	struct ipmi_lan_addr  *lan_addr;
	unsigned char ipmb_seq;
	long seqid;
	struct ipmi_channel *chans;
	int rv = 0;

	if (addr->channel >= IPMI_MAX_CHANNELS) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	chans = READ_ONCE(intf->channel_list)->c;

	if ((chans[addr->channel].medium
				!= IPMI_CHANNEL_MEDIUM_8023LAN)
			&& (chans[addr->channel].medium
			    != IPMI_CHANNEL_MEDIUM_ASYNC)) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	/* 11 for the header and 1 for the checksum. */
	if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EMSGSIZE;
	}

	lan_addr = (struct ipmi_lan_addr *) addr;
	if (lan_addr->lun > 3) {
		ipmi_inc_stat(intf, sent_invalid_commands);
		return -EINVAL;
	}

	memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));

	if (recv_msg->msg.netfn & 0x1) {
		/*
		 * It's a response, so use the user's sequence
		 * from msgid.
		 */
		ipmi_inc_stat(intf, sent_lan_responses);
		format_lan_msg(smi_msg, msg, lan_addr, msgid,
			       msgid, source_lun);

		/*
		 * Save the receive message so we can use it
		 * to deliver the response.
		 */
		smi_msg->user_data = recv_msg;
	} else {
		/* It's a command, so get a sequence for it. */
		unsigned long flags;

		spin_lock_irqsave(&intf->seq_lock, flags);

		/*
		 * Create a sequence number with a 1 second
		 * timeout and 4 retries.
		 */
		rv = intf_next_seq(intf,
				   recv_msg,
				   retry_time_ms,
				   retries,
				   0,
				   &ipmb_seq,
				   &seqid);
		if (rv)
			/*
			 * We have used up all the sequence numbers,
			 * probably, so abort.
			 */
			goto out_err;

		ipmi_inc_stat(intf, sent_lan_commands);

		/*
		 * Store the sequence number in the message,
		 * so that when the send message response
		 * comes back we can start the timer.
		 */
		format_lan_msg(smi_msg, msg, lan_addr,
			       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
			       ipmb_seq, source_lun);

		/*
		 * Copy the message into the recv message data, so we
		 * can retransmit it later if necessary.
		 */
		memcpy(recv_msg->msg_data, smi_msg->data,
		       smi_msg->data_size);
		recv_msg->msg.data = recv_msg->msg_data;
		recv_msg->msg.data_len = smi_msg->data_size;

		/*
		 * We don't unlock until here, because we need
		 * to copy the completed message into the
		 * recv_msg before we release the lock.
		 * Otherwise, race conditions may bite us.  I
		 * know that's pretty paranoid, but I prefer
		 * to be correct.
		 */
out_err:
		spin_unlock_irqrestore(&intf->seq_lock, flags);
	}

	return rv;
}

2174 2175 2176 2177 2178 2179
/*
 * Separate from ipmi_request so that the user does not have to be
 * supplied in certain circumstances (mainly at panic time).  If
 * messages are supplied, they will be freed, even if an error
 * occurs.
 */
2180
static int i_ipmi_request(struct ipmi_user     *user,
2181
			  struct ipmi_smi      *intf,
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
			  struct ipmi_addr     *addr,
			  long                 msgid,
			  struct kernel_ipmi_msg *msg,
			  void                 *user_msg_data,
			  void                 *supplied_smi,
			  struct ipmi_recv_msg *supplied_recv,
			  int                  priority,
			  unsigned char        source_address,
			  unsigned char        source_lun,
			  int                  retries,
			  unsigned int         retry_time_ms)
L
Linus Torvalds 已提交
2193
{
C
Corey Minyard 已提交
2194 2195 2196
	struct ipmi_smi_msg *smi_msg;
	struct ipmi_recv_msg *recv_msg;
	int rv = 0;
L
Linus Torvalds 已提交
2197

2198
	if (supplied_recv)
L
Linus Torvalds 已提交
2199
		recv_msg = supplied_recv;
2200
	else {
L
Linus Torvalds 已提交
2201
		recv_msg = ipmi_alloc_recv_msg();
2202 2203 2204 2205
		if (recv_msg == NULL) {
			rv = -ENOMEM;
			goto out;
		}
L
Linus Torvalds 已提交
2206 2207 2208
	}
	recv_msg->user_msg_data = user_msg_data;

2209
	if (supplied_smi)
L
Linus Torvalds 已提交
2210
		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2211
	else {
L
Linus Torvalds 已提交
2212 2213
		smi_msg = ipmi_alloc_smi_msg();
		if (smi_msg == NULL) {
2214 2215
			if (!supplied_recv)
				ipmi_free_recv_msg(recv_msg);
2216 2217
			rv = -ENOMEM;
			goto out;
L
Linus Torvalds 已提交
2218 2219 2220
		}
	}

2221
	rcu_read_lock();
2222
	if (intf->in_shutdown) {
2223 2224 2225 2226
		rv = -ENODEV;
		goto out_err;
	}

L
Linus Torvalds 已提交
2227
	recv_msg->user = user;
2228
	if (user)
2229
		/* The put happens when the message is freed. */
2230
		kref_get(&user->refcount);
L
Linus Torvalds 已提交
2231
	recv_msg->msgid = msgid;
2232 2233 2234 2235
	/*
	 * Store the message to send in the receive message so timeout
	 * responses can get the proper response data.
	 */
L
Linus Torvalds 已提交
2236 2237 2238
	recv_msg->msg = *msg;

	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
C
Corey Minyard 已提交
2239 2240
		rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
					recv_msg, retries, retry_time_ms);
2241
	} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
C
Corey Minyard 已提交
2242 2243 2244
		rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
				     source_address, source_lun,
				     retries, retry_time_ms);
2245
	} else if (is_lan_addr(addr)) {
C
Corey Minyard 已提交
2246 2247
		rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
				    source_lun, retries, retry_time_ms);
L
Linus Torvalds 已提交
2248 2249
	} else {
	    /* Unknown address type. */
2250
		ipmi_inc_stat(intf, sent_invalid_commands);
L
Linus Torvalds 已提交
2251 2252 2253
		rv = -EINVAL;
	}

C
Corey Minyard 已提交
2254 2255 2256 2257 2258
	if (rv) {
out_err:
		ipmi_free_smi_msg(smi_msg);
		ipmi_free_recv_msg(recv_msg);
	} else {
2259
		pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
2260

C
Corey Minyard 已提交
2261 2262
		smi_send(intf, intf->handlers, smi_msg, priority);
	}
2263
	rcu_read_unlock();
L
Linus Torvalds 已提交
2264

2265
out:
L
Linus Torvalds 已提交
2266 2267 2268
	return rv;
}

2269
static int check_addr(struct ipmi_smi  *intf,
2270 2271 2272 2273 2274 2275
		      struct ipmi_addr *addr,
		      unsigned char    *saddr,
		      unsigned char    *lun)
{
	if (addr->channel >= IPMI_MAX_CHANNELS)
		return -EINVAL;
2276
	addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2277 2278
	*lun = intf->addrinfo[addr->channel].lun;
	*saddr = intf->addrinfo[addr->channel].address;
2279 2280 2281
	return 0;
}

2282
int ipmi_request_settime(struct ipmi_user *user,
L
Linus Torvalds 已提交
2283 2284 2285 2286 2287 2288 2289 2290
			 struct ipmi_addr *addr,
			 long             msgid,
			 struct kernel_ipmi_msg  *msg,
			 void             *user_msg_data,
			 int              priority,
			 int              retries,
			 unsigned int     retry_time_ms)
{
2291
	unsigned char saddr = 0, lun = 0;
2292
	int rv, index;
2293

2294
	if (!user)
2295
		return -EINVAL;
2296 2297 2298 2299 2300

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

2301
	rv = check_addr(user->intf, addr, &saddr, &lun);
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
	if (!rv)
		rv = i_ipmi_request(user,
				    user->intf,
				    addr,
				    msgid,
				    msg,
				    user_msg_data,
				    NULL, NULL,
				    priority,
				    saddr,
				    lun,
				    retries,
				    retry_time_ms);

	release_ipmi_user(user, index);
	return rv;
L
Linus Torvalds 已提交
2318
}
2319
EXPORT_SYMBOL(ipmi_request_settime);
L
Linus Torvalds 已提交
2320

2321
int ipmi_request_supply_msgs(struct ipmi_user     *user,
L
Linus Torvalds 已提交
2322 2323 2324 2325 2326 2327 2328 2329
			     struct ipmi_addr     *addr,
			     long                 msgid,
			     struct kernel_ipmi_msg *msg,
			     void                 *user_msg_data,
			     void                 *supplied_smi,
			     struct ipmi_recv_msg *supplied_recv,
			     int                  priority)
{
2330
	unsigned char saddr = 0, lun = 0;
2331
	int rv, index;
2332

2333
	if (!user)
2334
		return -EINVAL;
2335 2336 2337 2338 2339

	user = acquire_ipmi_user(user, &index);
	if (!user)
		return -ENODEV;

2340
	rv = check_addr(user->intf, addr, &saddr, &lun);
2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
	if (!rv)
		rv = i_ipmi_request(user,
				    user->intf,
				    addr,
				    msgid,
				    msg,
				    user_msg_data,
				    supplied_smi,
				    supplied_recv,
				    priority,
				    saddr,
				    lun,
				    -1, 0);

	release_ipmi_user(user, index);
	return rv;
L
Linus Torvalds 已提交
2357
}
2358
EXPORT_SYMBOL(ipmi_request_supply_msgs);
L
Linus Torvalds 已提交
2359

2360 2361
static void bmc_device_id_handler(struct ipmi_smi *intf,
				  struct ipmi_recv_msg *msg)
2362 2363 2364 2365 2366 2367
{
	int rv;

	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
			|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
			|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2368
		dev_warn(intf->si_dev,
2369 2370
			 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
			 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2371 2372 2373 2374 2375 2376
		return;
	}

	rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
			msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
	if (rv) {
2377
		dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
		intf->bmc->dyn_id_set = 0;
	} else {
		/*
		 * Make sure the id data is available before setting
		 * dyn_id_set.
		 */
		smp_wmb();
		intf->bmc->dyn_id_set = 1;
	}

	wake_up(&intf->waitq);
}

static int
2392
send_get_device_id_cmd(struct ipmi_smi *intf)
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414
{
	struct ipmi_system_interface_addr si;
	struct kernel_ipmi_msg msg;

	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	si.channel = IPMI_BMC_CHANNEL;
	si.lun = 0;

	msg.netfn = IPMI_NETFN_APP_REQUEST;
	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
	msg.data = NULL;
	msg.data_len = 0;

	return i_ipmi_request(NULL,
			      intf,
			      (struct ipmi_addr *) &si,
			      0,
			      &msg,
			      intf,
			      NULL,
			      NULL,
			      0,
2415 2416
			      intf->addrinfo[0].address,
			      intf->addrinfo[0].lun,
2417 2418 2419
			      -1, 0);
}

2420
static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450
{
	int rv;

	bmc->dyn_id_set = 2;

	intf->null_user_handler = bmc_device_id_handler;

	rv = send_get_device_id_cmd(intf);
	if (rv)
		return rv;

	wait_event(intf->waitq, bmc->dyn_id_set != 2);

	if (!bmc->dyn_id_set)
		rv = -EIO; /* Something went wrong in the fetch. */

	/* dyn_id_set makes the id data available. */
	smp_rmb();

	intf->null_user_handler = NULL;

	return rv;
}

/*
 * Fetch the device id for the bmc/interface.  You must pass in either
 * bmc or intf, this code will get the other one.  If the data has
 * been recently fetched, this will just use the cached data.  Otherwise
 * it will run a new fetch.
 *
2451
 * Except for the first time this is called (in ipmi_add_smi()),
2452 2453
 * this will always return good data;
 */
2454
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2455
			       struct ipmi_device_id *id,
2456
			       bool *guid_set, guid_t *guid, int intf_num)
2457
{
2458
	int rv = 0;
2459
	int prev_dyn_id_set, prev_guid_set;
2460
	bool intf_set = intf != NULL;
2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482

	if (!intf) {
		mutex_lock(&bmc->dyn_mutex);
retry_bmc_lock:
		if (list_empty(&bmc->intfs)) {
			mutex_unlock(&bmc->dyn_mutex);
			return -ENOENT;
		}
		intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
					bmc_link);
		kref_get(&intf->refcount);
		mutex_unlock(&bmc->dyn_mutex);
		mutex_lock(&intf->bmc_reg_mutex);
		mutex_lock(&bmc->dyn_mutex);
		if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
					     bmc_link)) {
			mutex_unlock(&intf->bmc_reg_mutex);
			kref_put(&intf->refcount, intf_free);
			goto retry_bmc_lock;
		}
	} else {
		mutex_lock(&intf->bmc_reg_mutex);
2483
		bmc = intf->bmc;
2484 2485 2486
		mutex_lock(&bmc->dyn_mutex);
		kref_get(&intf->refcount);
	}
2487

2488
	/* If we have a valid and current ID, just return that. */
2489 2490 2491
	if (intf->in_bmc_register ||
	    (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
		goto out_noprocessing;
2492

2493 2494 2495 2496
	prev_guid_set = bmc->dyn_guid_set;
	__get_guid(intf);

	prev_dyn_id_set = bmc->dyn_id_set;
2497 2498 2499 2500
	rv = __get_device_id(intf, bmc);
	if (rv)
		goto out;

2501 2502 2503 2504 2505 2506 2507 2508
	/*
	 * The guid, device id, manufacturer id, and product id should
	 * not change on a BMC.  If it does we have to do some dancing.
	 */
	if (!intf->bmc_registered
	    || (!prev_guid_set && bmc->dyn_guid_set)
	    || (!prev_dyn_id_set && bmc->dyn_id_set)
	    || (prev_guid_set && bmc->dyn_guid_set
2509
		&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
2510 2511 2512 2513 2514
	    || bmc->id.device_id != bmc->fetch_id.device_id
	    || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
	    || bmc->id.product_id != bmc->fetch_id.product_id) {
		struct ipmi_device_id id = bmc->fetch_id;
		int guid_set = bmc->dyn_guid_set;
2515
		guid_t guid;
2516

2517
		guid = bmc->fetch_guid;
2518 2519 2520 2521 2522 2523
		mutex_unlock(&bmc->dyn_mutex);

		__ipmi_bmc_unregister(intf);
		/* Fill in the temporary BMC for good measure. */
		intf->bmc->id = id;
		intf->bmc->dyn_guid_set = guid_set;
2524 2525
		intf->bmc->guid = guid;
		if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2526
			need_waiter(intf); /* Retry later on an error. */
2527 2528 2529
		else
			__scan_channels(intf, &id);

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545

		if (!intf_set) {
			/*
			 * We weren't given the interface on the
			 * command line, so restart the operation on
			 * the next interface for the BMC.
			 */
			mutex_unlock(&intf->bmc_reg_mutex);
			mutex_lock(&bmc->dyn_mutex);
			goto retry_bmc_lock;
		}

		/* We have a new BMC, set it up. */
		bmc = intf->bmc;
		mutex_lock(&bmc->dyn_mutex);
		goto out_noprocessing;
2546 2547 2548
	} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
		/* Version info changes, scan the channels again. */
		__scan_channels(intf, &bmc->fetch_id);
2549 2550 2551 2552 2553 2554 2555 2556

	bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;

out:
	if (rv && prev_dyn_id_set) {
		rv = 0; /* Ignore failures if we have previous data. */
		bmc->dyn_id_set = prev_dyn_id_set;
	}
2557 2558 2559
	if (!rv) {
		bmc->id = bmc->fetch_id;
		if (bmc->dyn_guid_set)
2560
			bmc->guid = bmc->fetch_guid;
2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
		else if (prev_guid_set)
			/*
			 * The guid used to be valid and it failed to fetch,
			 * just use the cached value.
			 */
			bmc->dyn_guid_set = prev_guid_set;
	}
out_noprocessing:
	if (!rv) {
		if (id)
			*id = bmc->id;
2572

2573 2574
		if (guid_set)
			*guid_set = bmc->dyn_guid_set;
2575

2576
		if (guid && bmc->dyn_guid_set)
2577
			*guid =  bmc->guid;
2578
	}
2579

2580 2581 2582 2583 2584
	mutex_unlock(&bmc->dyn_mutex);
	mutex_unlock(&intf->bmc_reg_mutex);

	kref_put(&intf->refcount, intf_free);
	return rv;
2585 2586
}

2587
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2588
			     struct ipmi_device_id *id,
2589
			     bool *guid_set, guid_t *guid)
2590 2591 2592 2593
{
	return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
}

2594 2595 2596 2597
static ssize_t device_id_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
2598
	struct bmc_device *bmc = to_bmc_device(dev);
2599 2600 2601
	struct ipmi_device_id id;
	int rv;

2602
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2603 2604
	if (rv)
		return rv;
2605

2606
	return snprintf(buf, 10, "%u\n", id.device_id);
2607
}
J
Joe Perches 已提交
2608
static DEVICE_ATTR_RO(device_id);
2609

2610 2611 2612
static ssize_t provides_device_sdrs_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
2613
{
2614
	struct bmc_device *bmc = to_bmc_device(dev);
2615 2616
	struct ipmi_device_id id;
	int rv;
2617

2618
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2619 2620 2621 2622
	if (rv)
		return rv;

	return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2623
}
J
Joe Perches 已提交
2624
static DEVICE_ATTR_RO(provides_device_sdrs);
2625 2626 2627 2628

static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
2629
	struct bmc_device *bmc = to_bmc_device(dev);
2630 2631
	struct ipmi_device_id id;
	int rv;
2632

2633
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2634 2635 2636 2637
	if (rv)
		return rv;

	return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2638
}
J
Joe Perches 已提交
2639
static DEVICE_ATTR_RO(revision);
2640

2641 2642 2643
static ssize_t firmware_revision_show(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
2644
{
2645
	struct bmc_device *bmc = to_bmc_device(dev);
2646 2647
	struct ipmi_device_id id;
	int rv;
2648

2649
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2650 2651 2652 2653 2654
	if (rv)
		return rv;

	return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
			id.firmware_revision_2);
2655
}
J
Joe Perches 已提交
2656
static DEVICE_ATTR_RO(firmware_revision);
2657 2658 2659 2660 2661

static ssize_t ipmi_version_show(struct device *dev,
				 struct device_attribute *attr,
				 char *buf)
{
2662
	struct bmc_device *bmc = to_bmc_device(dev);
2663 2664 2665
	struct ipmi_device_id id;
	int rv;

2666
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2667 2668
	if (rv)
		return rv;
2669 2670

	return snprintf(buf, 20, "%u.%u\n",
2671 2672
			ipmi_version_major(&id),
			ipmi_version_minor(&id));
2673
}
J
Joe Perches 已提交
2674
static DEVICE_ATTR_RO(ipmi_version);
2675 2676 2677 2678 2679

static ssize_t add_dev_support_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
2680
	struct bmc_device *bmc = to_bmc_device(dev);
2681 2682
	struct ipmi_device_id id;
	int rv;
2683

2684
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2685 2686 2687 2688
	if (rv)
		return rv;

	return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2689
}
2690 2691
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
		   NULL);
2692 2693 2694 2695 2696

static ssize_t manufacturer_id_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
2697
	struct bmc_device *bmc = to_bmc_device(dev);
2698 2699 2700
	struct ipmi_device_id id;
	int rv;

2701
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2702 2703
	if (rv)
		return rv;
2704

2705
	return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2706
}
J
Joe Perches 已提交
2707
static DEVICE_ATTR_RO(manufacturer_id);
2708 2709 2710 2711 2712

static ssize_t product_id_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
2713
	struct bmc_device *bmc = to_bmc_device(dev);
2714 2715 2716
	struct ipmi_device_id id;
	int rv;

2717
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2718 2719
	if (rv)
		return rv;
2720

2721
	return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2722
}
J
Joe Perches 已提交
2723
static DEVICE_ATTR_RO(product_id);
2724 2725 2726 2727 2728

static ssize_t aux_firmware_rev_show(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
2729
	struct bmc_device *bmc = to_bmc_device(dev);
2730 2731 2732
	struct ipmi_device_id id;
	int rv;

2733
	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2734 2735
	if (rv)
		return rv;
2736 2737

	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2738 2739 2740 2741
			id.aux_firmware_revision[3],
			id.aux_firmware_revision[2],
			id.aux_firmware_revision[1],
			id.aux_firmware_revision[0]);
2742
}
2743
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2744 2745 2746 2747

static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
2748
	struct bmc_device *bmc = to_bmc_device(dev);
2749
	bool guid_set;
2750
	guid_t guid;
2751 2752
	int rv;

2753
	rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2754 2755 2756 2757
	if (rv)
		return rv;
	if (!guid_set)
		return -ENOENT;
2758

2759
	return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
2760
}
J
Joe Perches 已提交
2761
static DEVICE_ATTR_RO(guid);
2762 2763 2764 2765 2766 2767 2768 2769 2770 2771

static struct attribute *bmc_dev_attrs[] = {
	&dev_attr_device_id.attr,
	&dev_attr_provides_device_sdrs.attr,
	&dev_attr_revision.attr,
	&dev_attr_firmware_revision.attr,
	&dev_attr_ipmi_version.attr,
	&dev_attr_additional_device_support.attr,
	&dev_attr_manufacturer_id.attr,
	&dev_attr_product_id.attr,
2772 2773
	&dev_attr_aux_firmware_revision.attr,
	&dev_attr_guid.attr,
2774 2775
	NULL
};
2776

2777 2778 2779 2780 2781 2782
static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
				       struct attribute *attr, int idx)
{
	struct device *dev = kobj_to_dev(kobj);
	struct bmc_device *bmc = to_bmc_device(dev);
	umode_t mode = attr->mode;
2783
	int rv;
2784

2785
	if (attr == &dev_attr_aux_firmware_revision.attr) {
2786 2787 2788
		struct ipmi_device_id id;

		rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2789 2790
		return (!rv && id.aux_firmware_revision_set) ? mode : 0;
	}
2791 2792 2793 2794 2795 2796
	if (attr == &dev_attr_guid.attr) {
		bool guid_set;

		rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
		return (!rv && guid_set) ? mode : 0;
	}
2797 2798 2799
	return mode;
}

2800
static const struct attribute_group bmc_dev_attr_group = {
2801
	.attrs		= bmc_dev_attrs,
2802
	.is_visible	= bmc_dev_attr_is_visible,
2803
};
J
Jeff Garzik 已提交
2804

2805 2806 2807 2808 2809
static const struct attribute_group *bmc_dev_attr_groups[] = {
	&bmc_dev_attr_group,
	NULL
};

2810
static const struct device_type bmc_device_type = {
2811 2812 2813
	.groups		= bmc_dev_attr_groups,
};

2814
static int __find_bmc_guid(struct device *dev, const void *data)
2815
{
2816
	const guid_t *guid = data;
2817 2818
	struct bmc_device *bmc;
	int rv;
2819

2820 2821 2822
	if (dev->type != &bmc_device_type)
		return 0;

2823
	bmc = to_bmc_device(dev);
2824
	rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2825 2826 2827
	if (rv)
		rv = kref_get_unless_zero(&bmc->usecount);
	return rv;
2828 2829
}

2830
/*
2831
 * Returns with the bmc's usecount incremented, if it is non-NULL.
2832
 */
2833
static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2834
					     guid_t *guid)
2835 2836
{
	struct device *dev;
2837
	struct bmc_device *bmc = NULL;
2838 2839

	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2840 2841 2842 2843 2844
	if (dev) {
		bmc = to_bmc_device(dev);
		put_device(dev);
	}
	return bmc;
2845 2846 2847 2848 2849 2850 2851
}

struct prod_dev_id {
	unsigned int  product_id;
	unsigned char device_id;
};

2852
static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2853
{
2854
	const struct prod_dev_id *cid = data;
2855
	struct bmc_device *bmc;
2856
	int rv;
2857 2858 2859

	if (dev->type != &bmc_device_type)
		return 0;
2860

2861
	bmc = to_bmc_device(dev);
2862 2863
	rv = (bmc->id.product_id == cid->product_id
	      && bmc->id.device_id == cid->device_id);
2864
	if (rv)
2865 2866
		rv = kref_get_unless_zero(&bmc->usecount);
	return rv;
2867 2868
}

2869
/*
2870
 * Returns with the bmc's usecount incremented, if it is non-NULL.
2871
 */
2872 2873 2874 2875 2876 2877 2878 2879 2880
static struct bmc_device *ipmi_find_bmc_prod_dev_id(
	struct device_driver *drv,
	unsigned int product_id, unsigned char device_id)
{
	struct prod_dev_id id = {
		.product_id = product_id,
		.device_id = device_id,
	};
	struct device *dev;
2881
	struct bmc_device *bmc = NULL;
2882 2883

	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2884 2885 2886 2887 2888
	if (dev) {
		bmc = to_bmc_device(dev);
		put_device(dev);
	}
	return bmc;
2889 2890
}

2891 2892
static DEFINE_IDA(ipmi_bmc_ida);

2893 2894 2895 2896
static void
release_bmc_device(struct device *dev)
{
	kfree(to_bmc_device(dev));
J
Jeff Garzik 已提交
2897 2898
}

2899
static void cleanup_bmc_work(struct work_struct *work)
J
Jeff Garzik 已提交
2900
{
2901 2902
	struct bmc_device *bmc = container_of(work, struct bmc_device,
					      remove_work);
2903
	int id = bmc->pdev.id; /* Unregister overwrites id */
J
Jeff Garzik 已提交
2904

2905
	platform_device_unregister(&bmc->pdev);
2906
	ida_simple_remove(&ipmi_bmc_ida, id);
2907 2908
}

2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
static void
cleanup_bmc_device(struct kref *ref)
{
	struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);

	/*
	 * Remove the platform device in a work queue to avoid issues
	 * with removing the device attributes while reading a device
	 * attribute.
	 */
	schedule_work(&bmc->remove_work);
}

/*
 * Must be called with intf->bmc_reg_mutex held.
 */
2925
static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2926 2927 2928
{
	struct bmc_device *bmc = intf->bmc;

C
Corey Minyard 已提交
2929 2930 2931
	if (!intf->bmc_registered)
		return;

2932
	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
C
Corey Minyard 已提交
2933 2934 2935
	sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
	kfree(intf->my_dev_name);
	intf->my_dev_name = NULL;
2936

2937
	mutex_lock(&bmc->dyn_mutex);
2938
	list_del(&intf->bmc_link);
2939
	mutex_unlock(&bmc->dyn_mutex);
2940
	intf->bmc = &intf->tmp_bmc;
2941
	kref_put(&bmc->usecount, cleanup_bmc_device);
C
Corey Minyard 已提交
2942
	intf->bmc_registered = false;
2943
}
2944

2945
static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2946 2947 2948
{
	mutex_lock(&intf->bmc_reg_mutex);
	__ipmi_bmc_unregister(intf);
2949
	mutex_unlock(&intf->bmc_reg_mutex);
2950 2951
}

2952 2953 2954
/*
 * Must be called with intf->bmc_reg_mutex held.
 */
2955
static int __ipmi_bmc_register(struct ipmi_smi *intf,
2956
			       struct ipmi_device_id *id,
2957
			       bool guid_set, guid_t *guid, int intf_num)
2958 2959
{
	int               rv;
2960
	struct bmc_device *bmc;
2961 2962
	struct bmc_device *old_bmc;

2963 2964 2965 2966 2967 2968 2969 2970 2971
	/*
	 * platform_device_register() can cause bmc_reg_mutex to
	 * be claimed because of the is_visible functions of
	 * the attributes.  Eliminate possible recursion and
	 * release the lock.
	 */
	intf->in_bmc_register = true;
	mutex_unlock(&intf->bmc_reg_mutex);

2972 2973 2974 2975
	/*
	 * Try to find if there is an bmc_device struct
	 * representing the interfaced BMC already
	 */
2976
	mutex_lock(&ipmidriver_mutex);
2977 2978
	if (guid_set)
		old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2979
	else
2980
		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2981 2982
						    id->product_id,
						    id->device_id);
2983 2984 2985 2986 2987 2988

	/*
	 * If there is already an bmc_device, free the new one,
	 * otherwise register the new BMC device
	 */
	if (old_bmc) {
2989
		bmc = old_bmc;
2990 2991 2992 2993
		/*
		 * Note: old_bmc already has usecount incremented by
		 * the BMC find functions.
		 */
2994
		intf->bmc = old_bmc;
2995
		mutex_lock(&bmc->dyn_mutex);
2996
		list_add_tail(&intf->bmc_link, &bmc->intfs);
2997
		mutex_unlock(&bmc->dyn_mutex);
2998

2999
		dev_info(intf->si_dev,
3000
			 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3001 3002 3003
			 bmc->id.manufacturer_id,
			 bmc->id.product_id,
			 bmc->id.device_id);
3004
	} else {
3005 3006 3007 3008 3009 3010 3011
		bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
		if (!bmc) {
			rv = -ENOMEM;
			goto out;
		}
		INIT_LIST_HEAD(&bmc->intfs);
		mutex_init(&bmc->dyn_mutex);
3012 3013 3014 3015 3016
		INIT_WORK(&bmc->remove_work, cleanup_bmc_work);

		bmc->id = *id;
		bmc->dyn_id_set = 1;
		bmc->dyn_guid_set = guid_set;
3017
		bmc->guid = *guid;
3018
		bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3019

3020
		bmc->pdev.name = "ipmi_bmc";
3021

3022
		rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
3023 3024
		if (rv < 0) {
			kfree(bmc);
3025
			goto out;
3026 3027
		}

3028
		bmc->pdev.dev.driver = &ipmidriver.driver;
3029
		bmc->pdev.id = rv;
3030 3031
		bmc->pdev.dev.release = release_bmc_device;
		bmc->pdev.dev.type = &bmc_device_type;
3032
		kref_init(&bmc->usecount);
3033

3034 3035
		intf->bmc = bmc;
		mutex_lock(&bmc->dyn_mutex);
3036
		list_add_tail(&intf->bmc_link, &bmc->intfs);
3037 3038 3039
		mutex_unlock(&bmc->dyn_mutex);

		rv = platform_device_register(&bmc->pdev);
3040
		if (rv) {
3041
			dev_err(intf->si_dev,
3042
				"Unable to register bmc device: %d\n",
3043
				rv);
C
Corey Minyard 已提交
3044
			goto out_list_del;
3045 3046
		}

3047 3048
		dev_info(intf->si_dev,
			 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3049 3050 3051
			 bmc->id.manufacturer_id,
			 bmc->id.product_id,
			 bmc->id.device_id);
3052 3053 3054 3055 3056 3057
	}

	/*
	 * create symlink from system interface device to bmc device
	 * and back.
	 */
3058
	rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3059
	if (rv) {
3060
		dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
C
Corey Minyard 已提交
3061
		goto out_put_bmc;
3062 3063
	}

3064 3065 3066
	if (intf_num == -1)
		intf_num = intf->intf_num;
	intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3067 3068
	if (!intf->my_dev_name) {
		rv = -ENOMEM;
3069 3070
		dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
			rv);
C
Corey Minyard 已提交
3071
		goto out_unlink1;
3072 3073
	}

3074
	rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3075 3076 3077 3078
			       intf->my_dev_name);
	if (rv) {
		kfree(intf->my_dev_name);
		intf->my_dev_name = NULL;
3079 3080
		dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
			rv);
C
Corey Minyard 已提交
3081
		goto out_free_my_dev_name;
3082 3083
	}

C
Corey Minyard 已提交
3084
	intf->bmc_registered = true;
3085

C
Corey Minyard 已提交
3086
out:
3087 3088 3089
	mutex_unlock(&ipmidriver_mutex);
	mutex_lock(&intf->bmc_reg_mutex);
	intf->in_bmc_register = false;
3090
	return rv;
C
Corey Minyard 已提交
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100


out_free_my_dev_name:
	kfree(intf->my_dev_name);
	intf->my_dev_name = NULL;

out_unlink1:
	sysfs_remove_link(&intf->si_dev->kobj, "bmc");

out_put_bmc:
3101
	mutex_lock(&bmc->dyn_mutex);
3102
	list_del(&intf->bmc_link);
3103
	mutex_unlock(&bmc->dyn_mutex);
3104
	intf->bmc = &intf->tmp_bmc;
C
Corey Minyard 已提交
3105 3106 3107 3108
	kref_put(&bmc->usecount, cleanup_bmc_device);
	goto out;

out_list_del:
3109
	mutex_lock(&bmc->dyn_mutex);
3110
	list_del(&intf->bmc_link);
3111
	mutex_unlock(&bmc->dyn_mutex);
3112
	intf->bmc = &intf->tmp_bmc;
C
Corey Minyard 已提交
3113 3114
	put_device(&bmc->pdev.dev);
	goto out;
3115 3116 3117
}

static int
3118
send_guid_cmd(struct ipmi_smi *intf, int chan)
3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139
{
	struct kernel_ipmi_msg            msg;
	struct ipmi_system_interface_addr si;

	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	si.channel = IPMI_BMC_CHANNEL;
	si.lun = 0;

	msg.netfn = IPMI_NETFN_APP_REQUEST;
	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
	msg.data = NULL;
	msg.data_len = 0;
	return i_ipmi_request(NULL,
			      intf,
			      (struct ipmi_addr *) &si,
			      0,
			      &msg,
			      intf,
			      NULL,
			      NULL,
			      0,
3140 3141
			      intf->addrinfo[0].address,
			      intf->addrinfo[0].lun,
3142 3143 3144
			      -1, 0);
}

3145
static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3146
{
3147 3148
	struct bmc_device *bmc = intf->bmc;

3149 3150 3151 3152 3153 3154 3155 3156
	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
		/* Not for me */
		return;

	if (msg->msg.data[0] != 0) {
		/* Error from getting the GUID, the BMC doesn't have one. */
3157
		bmc->dyn_guid_set = 0;
3158 3159 3160
		goto out;
	}

3161
	if (msg->msg.data_len < UUID_SIZE + 1) {
3162
		bmc->dyn_guid_set = 0;
3163
		dev_warn(intf->si_dev,
3164 3165
			 "The GUID response from the BMC was too short, it was %d but should have been %d.  Assuming GUID is not available.\n",
			 msg->msg.data_len, UUID_SIZE + 1);
3166 3167 3168
		goto out;
	}

3169
	guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1));
3170 3171 3172 3173 3174 3175
	/*
	 * Make sure the guid data is available before setting
	 * dyn_guid_set.
	 */
	smp_wmb();
	bmc->dyn_guid_set = 1;
3176 3177 3178 3179
 out:
	wake_up(&intf->waitq);
}

3180
static void __get_guid(struct ipmi_smi *intf)
3181 3182
{
	int rv;
3183
	struct bmc_device *bmc = intf->bmc;
3184

3185
	bmc->dyn_guid_set = 2;
3186 3187 3188 3189
	intf->null_user_handler = guid_handler;
	rv = send_guid_cmd(intf, 0);
	if (rv)
		/* Send failed, no GUID available. */
3190 3191 3192 3193 3194 3195 3196
		bmc->dyn_guid_set = 0;

	wait_event(intf->waitq, bmc->dyn_guid_set != 2);

	/* dyn_guid_set makes the guid data available. */
	smp_rmb();

3197 3198 3199
	intf->null_user_handler = NULL;
}

L
Linus Torvalds 已提交
3200
static int
3201
send_channel_info_cmd(struct ipmi_smi *intf, int chan)
L
Linus Torvalds 已提交
3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
{
	struct kernel_ipmi_msg            msg;
	unsigned char                     data[1];
	struct ipmi_system_interface_addr si;

	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	si.channel = IPMI_BMC_CHANNEL;
	si.lun = 0;

	msg.netfn = IPMI_NETFN_APP_REQUEST;
	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
	msg.data = data;
	msg.data_len = 1;
	data[0] = chan;
	return i_ipmi_request(NULL,
			      intf,
			      (struct ipmi_addr *) &si,
			      0,
			      &msg,
3221
			      intf,
L
Linus Torvalds 已提交
3222 3223 3224
			      NULL,
			      NULL,
			      0,
3225 3226
			      intf->addrinfo[0].address,
			      intf->addrinfo[0].lun,
L
Linus Torvalds 已提交
3227 3228 3229 3230
			      -1, 0);
}

static void
3231
channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
L
Linus Torvalds 已提交
3232 3233
{
	int rv = 0;
3234 3235 3236
	int ch;
	unsigned int set = intf->curr_working_cset;
	struct ipmi_channel *chans;
L
Linus Torvalds 已提交
3237

3238 3239
	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3240
	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
L
Linus Torvalds 已提交
3241
		/* It's the one we want */
3242
		if (msg->msg.data[0] != 0) {
L
Linus Torvalds 已提交
3243 3244
			/* Got an error from the channel, just go on. */

3245
			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3246 3247 3248 3249 3250 3251
				/*
				 * If the MC does not support this
				 * command, that is legal.  We just
				 * assume it has one IPMB at channel
				 * zero.
				 */
3252
				intf->wchannels[set].c[0].medium
L
Linus Torvalds 已提交
3253
					= IPMI_CHANNEL_MEDIUM_IPMB;
3254
				intf->wchannels[set].c[0].protocol
L
Linus Torvalds 已提交
3255 3256
					= IPMI_CHANNEL_PROTOCOL_IPMB;

3257 3258
				intf->channel_list = intf->wchannels + set;
				intf->channels_ready = true;
L
Linus Torvalds 已提交
3259 3260 3261 3262 3263
				wake_up(&intf->waitq);
				goto out;
			}
			goto next_channel;
		}
3264
		if (msg->msg.data_len < 4) {
L
Linus Torvalds 已提交
3265 3266 3267
			/* Message not big enough, just go on. */
			goto next_channel;
		}
3268 3269 3270 3271
		ch = intf->curr_channel;
		chans = intf->wchannels[set].c;
		chans[ch].medium = msg->msg.data[2] & 0x7f;
		chans[ch].protocol = msg->msg.data[3] & 0x1f;
L
Linus Torvalds 已提交
3272

3273
 next_channel:
L
Linus Torvalds 已提交
3274
		intf->curr_channel++;
3275 3276 3277
		if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
			intf->channel_list = intf->wchannels + set;
			intf->channels_ready = true;
L
Linus Torvalds 已提交
3278
			wake_up(&intf->waitq);
3279 3280 3281
		} else {
			intf->channel_list = intf->wchannels + set;
			intf->channels_ready = true;
L
Linus Torvalds 已提交
3282
			rv = send_channel_info_cmd(intf, intf->curr_channel);
3283
		}
L
Linus Torvalds 已提交
3284 3285 3286

		if (rv) {
			/* Got an error somehow, just give up. */
3287
			dev_warn(intf->si_dev,
3288
				 "Error sending channel information for channel %d: %d\n",
3289
				 intf->curr_channel, rv);
3290

3291 3292
			intf->channel_list = intf->wchannels + set;
			intf->channels_ready = true;
L
Linus Torvalds 已提交
3293 3294 3295 3296 3297 3298 3299
			wake_up(&intf->waitq);
		}
	}
 out:
	return;
}

3300 3301 3302
/*
 * Must be holding intf->bmc_reg_mutex to call this.
 */
3303
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346
{
	int rv;

	if (ipmi_version_major(id) > 1
			|| (ipmi_version_major(id) == 1
			    && ipmi_version_minor(id) >= 5)) {
		unsigned int set;

		/*
		 * Start scanning the channels to see what is
		 * available.
		 */
		set = !intf->curr_working_cset;
		intf->curr_working_cset = set;
		memset(&intf->wchannels[set], 0,
		       sizeof(struct ipmi_channel_set));

		intf->null_user_handler = channel_handler;
		intf->curr_channel = 0;
		rv = send_channel_info_cmd(intf, 0);
		if (rv) {
			dev_warn(intf->si_dev,
				 "Error sending channel information for channel 0, %d\n",
				 rv);
			return -EIO;
		}

		/* Wait for the channel info to be read. */
		wait_event(intf->waitq, intf->channels_ready);
		intf->null_user_handler = NULL;
	} else {
		unsigned int set = intf->curr_working_cset;

		/* Assume a single IPMB channel at zero. */
		intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
		intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
		intf->channel_list = intf->wchannels + set;
		intf->channels_ready = true;
	}

	return 0;
}

3347
static void ipmi_poll(struct ipmi_smi *intf)
C
Corey Minyard 已提交
3348 3349 3350
{
	if (intf->handlers->poll)
		intf->handlers->poll(intf->send_info);
3351 3352
	/* In case something came in */
	handle_new_recv_msgs(intf);
C
Corey Minyard 已提交
3353
}
3354

3355
void ipmi_poll_interface(struct ipmi_user *user)
3356 3357
{
	ipmi_poll(user->intf);
C
Corey Minyard 已提交
3358
}
3359
EXPORT_SYMBOL(ipmi_poll_interface);
C
Corey Minyard 已提交
3360

3361 3362
static void redo_bmc_reg(struct work_struct *work)
{
3363 3364
	struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
					     bmc_reg_work);
3365 3366 3367 3368 3369 3370 3371

	if (!intf->in_shutdown)
		bmc_get_device_id(intf, NULL, NULL, NULL, NULL);

	kref_put(&intf->refcount, intf_free);
}

3372 3373 3374 3375 3376
int ipmi_add_smi(struct module         *owner,
		 const struct ipmi_smi_handlers *handlers,
		 void		       *send_info,
		 struct device         *si_dev,
		 unsigned char         slave_addr)
L
Linus Torvalds 已提交
3377 3378 3379
{
	int              i, j;
	int              rv;
3380
	struct ipmi_smi *intf, *tintf;
3381
	struct list_head *link;
3382
	struct ipmi_device_id id;
L
Linus Torvalds 已提交
3383

3384 3385 3386 3387
	/*
	 * Make sure the driver is actually initialized, this handles
	 * problems with initialization order.
	 */
3388 3389 3390
	rv = ipmi_init_msghandler();
	if (rv)
		return rv;
L
Linus Torvalds 已提交
3391

3392
	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3393
	if (!intf)
L
Linus Torvalds 已提交
3394
		return -ENOMEM;
3395

3396 3397 3398 3399 3400 3401
	rv = init_srcu_struct(&intf->users_srcu);
	if (rv) {
		kfree(intf);
		return rv;
	}

3402
	intf->owner = owner;
3403
	intf->bmc = &intf->tmp_bmc;
3404
	INIT_LIST_HEAD(&intf->bmc->intfs);
3405 3406 3407
	mutex_init(&intf->bmc->dyn_mutex);
	INIT_LIST_HEAD(&intf->bmc_link);
	mutex_init(&intf->bmc_reg_mutex);
3408
	intf->intf_num = -1; /* Mark it invalid for now. */
3409
	kref_init(&intf->refcount);
3410
	INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3411
	intf->si_dev = si_dev;
3412
	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3413 3414
		intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
		intf->addrinfo[j].lun = 2;
3415 3416
	}
	if (slave_addr != 0)
3417
		intf->addrinfo[0].address = slave_addr;
3418 3419 3420 3421 3422 3423 3424 3425 3426
	INIT_LIST_HEAD(&intf->users);
	intf->handlers = handlers;
	intf->send_info = send_info;
	spin_lock_init(&intf->seq_lock);
	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
		intf->seq_table[j].inuse = 0;
		intf->seq_table[j].seqid = 0;
	}
	intf->curr_seq = 0;
3427 3428
	spin_lock_init(&intf->waiting_rcv_msgs_lock);
	INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3429 3430 3431 3432
	tasklet_init(&intf->recv_tasklet,
		     smi_recv_tasklet,
		     (unsigned long) intf);
	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3433 3434 3435
	spin_lock_init(&intf->xmit_msgs_lock);
	INIT_LIST_HEAD(&intf->xmit_msgs);
	INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3436
	spin_lock_init(&intf->events_lock);
3437
	spin_lock_init(&intf->watch_lock);
3438 3439
	atomic_set(&intf->event_waiters, 0);
	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3440 3441
	INIT_LIST_HEAD(&intf->waiting_events);
	intf->waiting_events_count = 0;
3442
	mutex_init(&intf->cmd_rcvrs_mutex);
C
Corey Minyard 已提交
3443
	spin_lock_init(&intf->maintenance_mode_lock);
3444 3445
	INIT_LIST_HEAD(&intf->cmd_rcvrs);
	init_waitqueue_head(&intf->waitq);
3446 3447
	for (i = 0; i < IPMI_NUM_STATS; i++)
		atomic_set(&intf->stats[i], 0);
3448

3449 3450 3451 3452 3453 3454 3455
	mutex_lock(&ipmi_interfaces_mutex);
	/* Look for a hole in the numbers. */
	i = 0;
	link = &ipmi_interfaces;
	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
		if (tintf->intf_num != i) {
			link = &tintf->link;
L
Linus Torvalds 已提交
3456 3457
			break;
		}
3458
		i++;
L
Linus Torvalds 已提交
3459
	}
3460 3461 3462 3463 3464
	/* Add the new interface in numeric order. */
	if (i == 0)
		list_add_rcu(&intf->link, &ipmi_interfaces);
	else
		list_add_tail_rcu(&intf->link, link);
L
Linus Torvalds 已提交
3465

3466 3467
	rv = handlers->start_processing(send_info, intf);
	if (rv)
3468
		goto out_err;
L
Linus Torvalds 已提交
3469

3470
	rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3471 3472
	if (rv) {
		dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3473
		goto out_err_started;
3474 3475
	}

3476 3477 3478
	mutex_lock(&intf->bmc_reg_mutex);
	rv = __scan_channels(intf, &id);
	mutex_unlock(&intf->bmc_reg_mutex);
3479 3480
	if (rv)
		goto out_err_bmc_reg;
L
Linus Torvalds 已提交
3481

3482 3483 3484 3485 3486 3487 3488 3489
	/*
	 * Keep memory order straight for RCU readers.  Make
	 * sure everything else is committed to memory before
	 * setting intf_num to mark the interface valid.
	 */
	smp_wmb();
	intf->intf_num = i;
	mutex_unlock(&ipmi_interfaces_mutex);
3490

3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
	/* After this point the interface is legal to use. */
	call_smi_watchers(i, intf->si_dev);

	return 0;

 out_err_bmc_reg:
	ipmi_bmc_unregister(intf);
 out_err_started:
	if (intf->handlers->shutdown)
		intf->handlers->shutdown(intf->send_info);
 out_err:
	list_del_rcu(&intf->link);
	mutex_unlock(&ipmi_interfaces_mutex);
	synchronize_srcu(&ipmi_interfaces_srcu);
	cleanup_srcu_struct(&intf->users_srcu);
	kref_put(&intf->refcount, intf_free);
L
Linus Torvalds 已提交
3507 3508 3509

	return rv;
}
3510
EXPORT_SYMBOL(ipmi_add_smi);
L
Linus Torvalds 已提交
3511

3512
static void deliver_smi_err_response(struct ipmi_smi *intf,
3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
				     struct ipmi_smi_msg *msg,
				     unsigned char err)
{
	msg->rsp[0] = msg->data[0] | 4;
	msg->rsp[1] = msg->data[1];
	msg->rsp[2] = err;
	msg->rsp_size = 3;
	/* It's an error, so it will never requeue, no need to check return. */
	handle_one_recv_msg(intf, msg);
}

3524
static void cleanup_smi_msgs(struct ipmi_smi *intf)
3525 3526 3527
{
	int              i;
	struct seq_table *ent;
3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541
	struct ipmi_smi_msg *msg;
	struct list_head *entry;
	struct list_head tmplist;

	/* Clear out our transmit queues and hold the messages. */
	INIT_LIST_HEAD(&tmplist);
	list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
	list_splice_tail(&intf->xmit_msgs, &tmplist);

	/* Current message first, to preserve order */
	while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
		/* Wait for the message to clear out. */
		schedule_timeout(1);
	}
3542 3543

	/* No need for locks, the interface is down. */
3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555

	/*
	 * Return errors for all pending messages in queue and in the
	 * tables waiting for remote responses.
	 */
	while (!list_empty(&tmplist)) {
		entry = tmplist.next;
		list_del(entry);
		msg = list_entry(entry, struct ipmi_smi_msg, link);
		deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
	}

3556
	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3557
		ent = &intf->seq_table[i];
3558 3559
		if (!ent->inuse)
			continue;
C
Corey Minyard 已提交
3560
		deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3561 3562 3563
	}
}

3564
void ipmi_unregister_smi(struct ipmi_smi *intf)
L
Linus Torvalds 已提交
3565 3566
{
	struct ipmi_smi_watcher *w;
3567
	int intf_num = intf->intf_num, index;
L
Linus Torvalds 已提交
3568

3569
	mutex_lock(&ipmi_interfaces_mutex);
3570
	intf->intf_num = -1;
3571
	intf->in_shutdown = true;
3572 3573
	list_del_rcu(&intf->link);
	mutex_unlock(&ipmi_interfaces_mutex);
3574
	synchronize_srcu(&ipmi_interfaces_srcu);
3575

3576
	/* At this point no users can be added to the interface. */
L
Linus Torvalds 已提交
3577

3578 3579
	/*
	 * Call all the watcher interfaces to tell them that
3580
	 * an interface is going away.
3581
	 */
3582
	mutex_lock(&smi_watchers_mutex);
3583
	list_for_each_entry(w, &smi_watchers, link)
3584 3585
		w->smi_gone(intf_num);
	mutex_unlock(&smi_watchers_mutex);
3586

3587 3588 3589 3590 3591 3592 3593 3594 3595 3596
	index = srcu_read_lock(&intf->users_srcu);
	while (!list_empty(&intf->users)) {
		struct ipmi_user *user =
			container_of(list_next_rcu(&intf->users),
				     struct ipmi_user, link);

		_ipmi_destroy_user(user);
	}
	srcu_read_unlock(&intf->users_srcu, index);

3597 3598
	if (intf->handlers->shutdown)
		intf->handlers->shutdown(intf->send_info);
3599 3600 3601 3602 3603 3604

	cleanup_smi_msgs(intf);

	ipmi_bmc_unregister(intf);

	cleanup_srcu_struct(&intf->users_srcu);
3605
	kref_put(&intf->refcount, intf_free);
L
Linus Torvalds 已提交
3606
}
3607
EXPORT_SYMBOL(ipmi_unregister_smi);
L
Linus Torvalds 已提交
3608

3609
static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
3610 3611 3612 3613 3614
				   struct ipmi_smi_msg *msg)
{
	struct ipmi_ipmb_addr ipmb_addr;
	struct ipmi_recv_msg  *recv_msg;

3615 3616 3617 3618
	/*
	 * This is 11, not 10, because the response must contain a
	 * completion code.
	 */
L
Linus Torvalds 已提交
3619 3620
	if (msg->rsp_size < 11) {
		/* Message not big enough, just ignore it. */
3621
		ipmi_inc_stat(intf, invalid_ipmb_responses);
L
Linus Torvalds 已提交
3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
	ipmb_addr.slave_addr = msg->rsp[6];
	ipmb_addr.channel = msg->rsp[3] & 0x0f;
	ipmb_addr.lun = msg->rsp[7] & 3;

3635 3636 3637 3638
	/*
	 * It's a response from a remote entity.  Look up the sequence
	 * number and handle the response.
	 */
L
Linus Torvalds 已提交
3639 3640 3641 3642 3643
	if (intf_find_seq(intf,
			  msg->rsp[7] >> 2,
			  msg->rsp[3] & 0x0f,
			  msg->rsp[8],
			  (msg->rsp[4] >> 2) & (~1),
3644
			  (struct ipmi_addr *) &ipmb_addr,
3645 3646 3647 3648 3649
			  &recv_msg)) {
		/*
		 * We were unable to find the sequence number,
		 * so just nuke the message.
		 */
3650
		ipmi_inc_stat(intf, unhandled_ipmb_responses);
L
Linus Torvalds 已提交
3651 3652 3653
		return 0;
	}

3654
	memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3655 3656 3657 3658 3659
	/*
	 * The other fields matched, so no need to set them, except
	 * for netfn, which needs to be the response that was
	 * returned, not the request value.
	 */
L
Linus Torvalds 已提交
3660 3661 3662 3663
	recv_msg->msg.netfn = msg->rsp[4] >> 2;
	recv_msg->msg.data = recv_msg->msg_data;
	recv_msg->msg.data_len = msg->rsp_size - 10;
	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
C
Corey Minyard 已提交
3664 3665 3666 3667
	if (deliver_response(intf, recv_msg))
		ipmi_inc_stat(intf, unhandled_ipmb_responses);
	else
		ipmi_inc_stat(intf, handled_ipmb_responses);
L
Linus Torvalds 已提交
3668 3669 3670 3671

	return 0;
}

3672
static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
3673 3674
				   struct ipmi_smi_msg *msg)
{
3675 3676 3677 3678
	struct cmd_rcvr          *rcvr;
	int                      rv = 0;
	unsigned char            netfn;
	unsigned char            cmd;
3679
	unsigned char            chan;
3680
	struct ipmi_user         *user = NULL;
3681 3682
	struct ipmi_ipmb_addr    *ipmb_addr;
	struct ipmi_recv_msg     *recv_msg;
L
Linus Torvalds 已提交
3683 3684 3685

	if (msg->rsp_size < 10) {
		/* Message not big enough, just ignore it. */
3686
		ipmi_inc_stat(intf, invalid_commands);
L
Linus Torvalds 已提交
3687 3688 3689 3690 3691 3692 3693 3694 3695 3696
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	netfn = msg->rsp[4] >> 2;
	cmd = msg->rsp[8];
3697
	chan = msg->rsp[3] & 0xf;
L
Linus Torvalds 已提交
3698

3699
	rcu_read_lock();
3700
	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3701 3702 3703 3704 3705
	if (rcvr) {
		user = rcvr->user;
		kref_get(&user->refcount);
	} else
		user = NULL;
3706
	rcu_read_unlock();
L
Linus Torvalds 已提交
3707 3708 3709

	if (user == NULL) {
		/* We didn't find a user, deliver an error response. */
3710
		ipmi_inc_stat(intf, unhandled_commands);
L
Linus Torvalds 已提交
3711 3712 3713 3714 3715

		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
		msg->data[1] = IPMI_SEND_MSG_CMD;
		msg->data[2] = msg->rsp[3];
		msg->data[3] = msg->rsp[6];
3716
		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3717
		msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3718
		msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3719 3720
		/* rqseq/lun */
		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
L
Linus Torvalds 已提交
3721 3722
		msg->data[8] = msg->rsp[8]; /* cmd */
		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3723
		msg->data[10] = ipmb_checksum(&msg->data[6], 4);
L
Linus Torvalds 已提交
3724 3725
		msg->data_size = 11;

3726
		pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
C
Corey Minyard 已提交
3727

3728
		rcu_read_lock();
3729 3730
		if (!intf->in_shutdown) {
			smi_send(intf, intf->handlers, msg, 0);
3731 3732 3733 3734 3735
			/*
			 * We used the message, so return the value
			 * that causes it to not be freed or
			 * queued.
			 */
3736 3737 3738
			rv = -1;
		}
		rcu_read_unlock();
L
Linus Torvalds 已提交
3739 3740
	} else {
		recv_msg = ipmi_alloc_recv_msg();
3741
		if (!recv_msg) {
3742 3743 3744 3745 3746
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling
			 * later.
			 */
L
Linus Torvalds 已提交
3747
			rv = 1;
3748
			kref_put(&user->refcount, free_user);
L
Linus Torvalds 已提交
3749 3750 3751 3752 3753 3754 3755 3756
		} else {
			/* Extract the source address from the data. */
			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
			ipmb_addr->slave_addr = msg->rsp[6];
			ipmb_addr->lun = msg->rsp[7] & 3;
			ipmb_addr->channel = msg->rsp[3] & 0xf;

3757 3758 3759 3760
			/*
			 * Extract the rest of the message information
			 * from the IPMB header.
			 */
L
Linus Torvalds 已提交
3761 3762 3763 3764 3765 3766 3767
			recv_msg->user = user;
			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
			recv_msg->msgid = msg->rsp[7] >> 2;
			recv_msg->msg.netfn = msg->rsp[4] >> 2;
			recv_msg->msg.cmd = msg->rsp[8];
			recv_msg->msg.data = recv_msg->msg_data;

3768 3769 3770 3771
			/*
			 * We chop off 10, not 9 bytes because the checksum
			 * at the end also needs to be removed.
			 */
L
Linus Torvalds 已提交
3772
			recv_msg->msg.data_len = msg->rsp_size - 10;
3773
			memcpy(recv_msg->msg_data, &msg->rsp[9],
L
Linus Torvalds 已提交
3774
			       msg->rsp_size - 10);
C
Corey Minyard 已提交
3775 3776 3777 3778
			if (deliver_response(intf, recv_msg))
				ipmi_inc_stat(intf, unhandled_commands);
			else
				ipmi_inc_stat(intf, handled_commands);
L
Linus Torvalds 已提交
3779 3780 3781 3782 3783 3784
		}
	}

	return rv;
}

3785
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
3786 3787 3788 3789 3790 3791
				  struct ipmi_smi_msg *msg)
{
	struct ipmi_lan_addr  lan_addr;
	struct ipmi_recv_msg  *recv_msg;


3792 3793 3794 3795
	/*
	 * This is 13, not 12, because the response must contain a
	 * completion code.
	 */
L
Linus Torvalds 已提交
3796 3797
	if (msg->rsp_size < 13) {
		/* Message not big enough, just ignore it. */
3798
		ipmi_inc_stat(intf, invalid_lan_responses);
L
Linus Torvalds 已提交
3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
	lan_addr.session_handle = msg->rsp[4];
	lan_addr.remote_SWID = msg->rsp[8];
	lan_addr.local_SWID = msg->rsp[5];
	lan_addr.channel = msg->rsp[3] & 0x0f;
	lan_addr.privilege = msg->rsp[3] >> 4;
	lan_addr.lun = msg->rsp[9] & 3;

3815 3816 3817 3818
	/*
	 * It's a response from a remote entity.  Look up the sequence
	 * number and handle the response.
	 */
L
Linus Torvalds 已提交
3819 3820 3821 3822 3823
	if (intf_find_seq(intf,
			  msg->rsp[9] >> 2,
			  msg->rsp[3] & 0x0f,
			  msg->rsp[10],
			  (msg->rsp[6] >> 2) & (~1),
3824
			  (struct ipmi_addr *) &lan_addr,
3825 3826 3827 3828 3829
			  &recv_msg)) {
		/*
		 * We were unable to find the sequence number,
		 * so just nuke the message.
		 */
3830
		ipmi_inc_stat(intf, unhandled_lan_responses);
L
Linus Torvalds 已提交
3831 3832 3833
		return 0;
	}

3834
	memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3835 3836 3837 3838 3839
	/*
	 * The other fields matched, so no need to set them, except
	 * for netfn, which needs to be the response that was
	 * returned, not the request value.
	 */
L
Linus Torvalds 已提交
3840 3841 3842 3843
	recv_msg->msg.netfn = msg->rsp[6] >> 2;
	recv_msg->msg.data = recv_msg->msg_data;
	recv_msg->msg.data_len = msg->rsp_size - 12;
	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
C
Corey Minyard 已提交
3844 3845 3846 3847
	if (deliver_response(intf, recv_msg))
		ipmi_inc_stat(intf, unhandled_lan_responses);
	else
		ipmi_inc_stat(intf, handled_lan_responses);
L
Linus Torvalds 已提交
3848 3849 3850 3851

	return 0;
}

3852
static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
3853 3854
				  struct ipmi_smi_msg *msg)
{
3855 3856 3857 3858
	struct cmd_rcvr          *rcvr;
	int                      rv = 0;
	unsigned char            netfn;
	unsigned char            cmd;
3859
	unsigned char            chan;
3860
	struct ipmi_user         *user = NULL;
3861 3862
	struct ipmi_lan_addr     *lan_addr;
	struct ipmi_recv_msg     *recv_msg;
L
Linus Torvalds 已提交
3863 3864 3865

	if (msg->rsp_size < 12) {
		/* Message not big enough, just ignore it. */
3866
		ipmi_inc_stat(intf, invalid_commands);
L
Linus Torvalds 已提交
3867 3868 3869 3870 3871 3872 3873 3874 3875 3876
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	netfn = msg->rsp[6] >> 2;
	cmd = msg->rsp[10];
3877
	chan = msg->rsp[3] & 0xf;
L
Linus Torvalds 已提交
3878

3879
	rcu_read_lock();
3880
	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3881 3882 3883 3884 3885
	if (rcvr) {
		user = rcvr->user;
		kref_get(&user->refcount);
	} else
		user = NULL;
3886
	rcu_read_unlock();
L
Linus Torvalds 已提交
3887 3888

	if (user == NULL) {
3889
		/* We didn't find a user, just give up. */
3890
		ipmi_inc_stat(intf, unhandled_commands);
L
Linus Torvalds 已提交
3891

3892 3893 3894 3895 3896
		/*
		 * Don't do anything with these messages, just allow
		 * them to be freed.
		 */
		rv = 0;
L
Linus Torvalds 已提交
3897 3898
	} else {
		recv_msg = ipmi_alloc_recv_msg();
3899
		if (!recv_msg) {
3900 3901 3902 3903
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling later.
			 */
L
Linus Torvalds 已提交
3904
			rv = 1;
3905
			kref_put(&user->refcount, free_user);
L
Linus Torvalds 已提交
3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916
		} else {
			/* Extract the source address from the data. */
			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
			lan_addr->session_handle = msg->rsp[4];
			lan_addr->remote_SWID = msg->rsp[8];
			lan_addr->local_SWID = msg->rsp[5];
			lan_addr->lun = msg->rsp[9] & 3;
			lan_addr->channel = msg->rsp[3] & 0xf;
			lan_addr->privilege = msg->rsp[3] >> 4;

3917 3918 3919 3920
			/*
			 * Extract the rest of the message information
			 * from the IPMB header.
			 */
L
Linus Torvalds 已提交
3921 3922 3923 3924 3925 3926 3927
			recv_msg->user = user;
			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
			recv_msg->msgid = msg->rsp[9] >> 2;
			recv_msg->msg.netfn = msg->rsp[6] >> 2;
			recv_msg->msg.cmd = msg->rsp[10];
			recv_msg->msg.data = recv_msg->msg_data;

3928 3929 3930 3931
			/*
			 * We chop off 12, not 11 bytes because the checksum
			 * at the end also needs to be removed.
			 */
L
Linus Torvalds 已提交
3932
			recv_msg->msg.data_len = msg->rsp_size - 12;
3933
			memcpy(recv_msg->msg_data, &msg->rsp[11],
L
Linus Torvalds 已提交
3934
			       msg->rsp_size - 12);
C
Corey Minyard 已提交
3935 3936 3937 3938
			if (deliver_response(intf, recv_msg))
				ipmi_inc_stat(intf, unhandled_commands);
			else
				ipmi_inc_stat(intf, handled_commands);
L
Linus Torvalds 已提交
3939 3940 3941 3942 3943 3944
		}
	}

	return rv;
}

D
dann frazier 已提交
3945 3946 3947 3948 3949 3950
/*
 * This routine will handle "Get Message" command responses with
 * channels that use an OEM Medium. The message format belongs to
 * the OEM.  See IPMI 2.0 specification, Chapter 6 and
 * Chapter 22, sections 22.6 and 22.24 for more details.
 */
3951
static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
D
dann frazier 已提交
3952 3953 3954 3955 3956 3957 3958
				  struct ipmi_smi_msg *msg)
{
	struct cmd_rcvr       *rcvr;
	int                   rv = 0;
	unsigned char         netfn;
	unsigned char         cmd;
	unsigned char         chan;
3959
	struct ipmi_user *user = NULL;
D
dann frazier 已提交
3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022
	struct ipmi_system_interface_addr *smi_addr;
	struct ipmi_recv_msg  *recv_msg;

	/*
	 * We expect the OEM SW to perform error checking
	 * so we just do some basic sanity checks
	 */
	if (msg->rsp_size < 4) {
		/* Message not big enough, just ignore it. */
		ipmi_inc_stat(intf, invalid_commands);
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the response, just ignore it. */
		return 0;
	}

	/*
	 * This is an OEM Message so the OEM needs to know how
	 * handle the message. We do no interpretation.
	 */
	netfn = msg->rsp[0] >> 2;
	cmd = msg->rsp[1];
	chan = msg->rsp[3] & 0xf;

	rcu_read_lock();
	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
	if (rcvr) {
		user = rcvr->user;
		kref_get(&user->refcount);
	} else
		user = NULL;
	rcu_read_unlock();

	if (user == NULL) {
		/* We didn't find a user, just give up. */
		ipmi_inc_stat(intf, unhandled_commands);

		/*
		 * Don't do anything with these messages, just allow
		 * them to be freed.
		 */

		rv = 0;
	} else {
		recv_msg = ipmi_alloc_recv_msg();
		if (!recv_msg) {
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling
			 * later.
			 */
			rv = 1;
			kref_put(&user->refcount, free_user);
		} else {
			/*
			 * OEM Messages are expected to be delivered via
			 * the system interface to SMS software.  We might
			 * need to visit this again depending on OEM
			 * requirements
			 */
			smi_addr = ((struct ipmi_system_interface_addr *)
4023
				    &recv_msg->addr);
D
dann frazier 已提交
4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039
			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
			smi_addr->channel = IPMI_BMC_CHANNEL;
			smi_addr->lun = msg->rsp[0] & 3;

			recv_msg->user = user;
			recv_msg->user_msg_data = NULL;
			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
			recv_msg->msg.netfn = msg->rsp[0] >> 2;
			recv_msg->msg.cmd = msg->rsp[1];
			recv_msg->msg.data = recv_msg->msg_data;

			/*
			 * The message starts at byte 4 which follows the
			 * the Channel Byte in the "GET MESSAGE" command
			 */
			recv_msg->msg.data_len = msg->rsp_size - 4;
4040
			memcpy(recv_msg->msg_data, &msg->rsp[4],
D
dann frazier 已提交
4041
			       msg->rsp_size - 4);
C
Corey Minyard 已提交
4042 4043 4044 4045
			if (deliver_response(intf, recv_msg))
				ipmi_inc_stat(intf, unhandled_commands);
			else
				ipmi_inc_stat(intf, handled_commands);
D
dann frazier 已提交
4046 4047 4048 4049 4050 4051
		}
	}

	return rv;
}

L
Linus Torvalds 已提交
4052 4053 4054 4055
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
				     struct ipmi_smi_msg  *msg)
{
	struct ipmi_system_interface_addr *smi_addr;
4056

L
Linus Torvalds 已提交
4057
	recv_msg->msgid = 0;
4058
	smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
L
Linus Torvalds 已提交
4059 4060 4061 4062 4063 4064
	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	smi_addr->channel = IPMI_BMC_CHANNEL;
	smi_addr->lun = msg->rsp[0] & 3;
	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
	recv_msg->msg.netfn = msg->rsp[0] >> 2;
	recv_msg->msg.cmd = msg->rsp[1];
4065
	memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
L
Linus Torvalds 已提交
4066 4067 4068 4069
	recv_msg->msg.data = recv_msg->msg_data;
	recv_msg->msg.data_len = msg->rsp_size - 3;
}

4070
static int handle_read_event_rsp(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
4071 4072 4073 4074
				 struct ipmi_smi_msg *msg)
{
	struct ipmi_recv_msg *recv_msg, *recv_msg2;
	struct list_head     msgs;
4075
	struct ipmi_user     *user;
4076
	int rv = 0, deliver_count = 0, index;
L
Linus Torvalds 已提交
4077 4078 4079 4080
	unsigned long        flags;

	if (msg->rsp_size < 19) {
		/* Message is too small to be an IPMB event. */
4081
		ipmi_inc_stat(intf, invalid_events);
L
Linus Torvalds 已提交
4082 4083 4084 4085 4086 4087 4088 4089 4090 4091
		return 0;
	}

	if (msg->rsp[2] != 0) {
		/* An error getting the event, just ignore it. */
		return 0;
	}

	INIT_LIST_HEAD(&msgs);

4092
	spin_lock_irqsave(&intf->events_lock, flags);
L
Linus Torvalds 已提交
4093

4094
	ipmi_inc_stat(intf, events);
L
Linus Torvalds 已提交
4095

4096 4097 4098 4099
	/*
	 * Allocate and fill in one message for every user that is
	 * getting events.
	 */
4100
	index = srcu_read_lock(&intf->users_srcu);
4101
	list_for_each_entry_rcu(user, &intf->users, link) {
4102
		if (!user->gets_events)
L
Linus Torvalds 已提交
4103 4104 4105
			continue;

		recv_msg = ipmi_alloc_recv_msg();
4106
		if (!recv_msg) {
4107
			rcu_read_unlock();
4108 4109
			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
						 link) {
L
Linus Torvalds 已提交
4110 4111 4112
				list_del(&recv_msg->link);
				ipmi_free_recv_msg(recv_msg);
			}
4113 4114 4115 4116 4117
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling
			 * later.
			 */
L
Linus Torvalds 已提交
4118 4119 4120 4121 4122 4123 4124 4125
			rv = 1;
			goto out;
		}

		deliver_count++;

		copy_event_into_recv_msg(recv_msg, msg);
		recv_msg->user = user;
4126
		kref_get(&user->refcount);
4127
		list_add_tail(&recv_msg->link, &msgs);
L
Linus Torvalds 已提交
4128
	}
4129
	srcu_read_unlock(&intf->users_srcu, index);
L
Linus Torvalds 已提交
4130 4131 4132 4133 4134

	if (deliver_count) {
		/* Now deliver all the messages. */
		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
			list_del(&recv_msg->link);
C
Corey Minyard 已提交
4135
			deliver_local_response(intf, recv_msg);
L
Linus Torvalds 已提交
4136 4137
		}
	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4138 4139 4140 4141
		/*
		 * No one to receive the message, put it in queue if there's
		 * not already too many things in the queue.
		 */
L
Linus Torvalds 已提交
4142
		recv_msg = ipmi_alloc_recv_msg();
4143
		if (!recv_msg) {
4144 4145 4146 4147 4148
			/*
			 * We couldn't allocate memory for the
			 * message, so requeue it for handling
			 * later.
			 */
L
Linus Torvalds 已提交
4149 4150 4151 4152 4153
			rv = 1;
			goto out;
		}

		copy_event_into_recv_msg(recv_msg, msg);
4154
		list_add_tail(&recv_msg->link, &intf->waiting_events);
4155
		intf->waiting_events_count++;
4156
	} else if (!intf->event_msg_printed) {
4157 4158 4159 4160
		/*
		 * There's too many things in the queue, discard this
		 * message.
		 */
4161
		dev_warn(intf->si_dev,
4162
			 "Event queue full, discarding incoming events\n");
4163
		intf->event_msg_printed = 1;
L
Linus Torvalds 已提交
4164 4165 4166
	}

 out:
4167
	spin_unlock_irqrestore(&intf->events_lock, flags);
L
Linus Torvalds 已提交
4168 4169 4170 4171

	return rv;
}

4172
static int handle_bmc_rsp(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
4173 4174 4175
			  struct ipmi_smi_msg *msg)
{
	struct ipmi_recv_msg *recv_msg;
4176
	struct ipmi_system_interface_addr *smi_addr;
L
Linus Torvalds 已提交
4177 4178

	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4179
	if (recv_msg == NULL) {
4180
		dev_warn(intf->si_dev,
4181
			 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4182 4183
		return 0;
	}
L
Linus Torvalds 已提交
4184

4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197
	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
	recv_msg->msgid = msg->msgid;
	smi_addr = ((struct ipmi_system_interface_addr *)
		    &recv_msg->addr);
	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	smi_addr->channel = IPMI_BMC_CHANNEL;
	smi_addr->lun = msg->rsp[0] & 3;
	recv_msg->msg.netfn = msg->rsp[0] >> 2;
	recv_msg->msg.cmd = msg->rsp[1];
	memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
	recv_msg->msg.data = recv_msg->msg_data;
	recv_msg->msg.data_len = msg->rsp_size - 2;
	deliver_local_response(intf, recv_msg);
L
Linus Torvalds 已提交
4198 4199 4200 4201

	return 0;
}

4202
/*
4203
 * Handle a received message.  Return 1 if the message should be requeued,
4204 4205 4206
 * 0 if the message should be freed, or -1 if the message should not
 * be freed or requeued.
 */
4207
static int handle_one_recv_msg(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
4208 4209 4210 4211 4212
			       struct ipmi_smi_msg *msg)
{
	int requeue;
	int chan;

4213
	pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260

	if ((msg->data_size >= 2)
	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
	    && (msg->user_data == NULL)) {

		if (intf->in_shutdown)
			goto free_msg;

		/*
		 * This is the local response to a command send, start
		 * the timer for these.  The user_data will not be
		 * NULL if this is a response send, and we will let
		 * response sends just go through.
		 */

		/*
		 * Check for errors, if we get certain errors (ones
		 * that mean basically we can try again later), we
		 * ignore them and start the timer.  Otherwise we
		 * report the error immediately.
		 */
		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
		    && (msg->rsp[2] != IPMI_BUS_ERR)
		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
			int ch = msg->rsp[3] & 0xf;
			struct ipmi_channel *chans;

			/* Got an error sending the message, handle it. */

			chans = READ_ONCE(intf->channel_list)->c;
			if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
			    || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
				ipmi_inc_stat(intf, sent_lan_command_errs);
			else
				ipmi_inc_stat(intf, sent_ipmb_command_errs);
			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
		} else
			/* The message was sent, start the timer. */
			intf_start_seq_timer(intf, msg->msgid);
free_msg:
		requeue = 0;
		goto out;

	} else if (msg->rsp_size < 2) {
L
Linus Torvalds 已提交
4261
		/* Message is too small to be correct. */
4262
		dev_warn(intf->si_dev,
4263
			 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4264
			 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
L
Linus Torvalds 已提交
4265 4266 4267 4268 4269 4270

		/* Generate an error response for the message. */
		msg->rsp[0] = msg->data[0] | (1 << 2);
		msg->rsp[1] = msg->data[1];
		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
		msg->rsp_size = 3;
4271 4272 4273 4274 4275 4276
	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
		   || (msg->rsp[1] != msg->data[1])) {
		/*
		 * The NetFN and Command in the response is not even
		 * marginally correct.
		 */
4277
		dev_warn(intf->si_dev,
4278
			 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4279 4280
			 (msg->data[0] >> 2) | 1, msg->data[1],
			 msg->rsp[0] >> 2, msg->rsp[1]);
L
Linus Torvalds 已提交
4281 4282 4283 4284 4285 4286 4287 4288 4289 4290

		/* Generate an error response for the message. */
		msg->rsp[0] = msg->data[0] | (1 << 2);
		msg->rsp[1] = msg->data[1];
		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
		msg->rsp_size = 3;
	}

	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4291 4292 4293 4294 4295
	    && (msg->user_data != NULL)) {
		/*
		 * It's a response to a response we sent.  For this we
		 * deliver a send message response to the user.
		 */
4296
		struct ipmi_recv_msg *recv_msg = msg->user_data;
L
Linus Torvalds 已提交
4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307

		requeue = 0;
		if (msg->rsp_size < 2)
			/* Message is too small to be correct. */
			goto out;

		chan = msg->data[2] & 0x0f;
		if (chan >= IPMI_MAX_CHANNELS)
			/* Invalid channel number */
			goto out;

4308 4309 4310 4311 4312 4313 4314
		if (!recv_msg)
			goto out;

		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
		recv_msg->msg.data = recv_msg->msg_data;
		recv_msg->msg.data_len = 1;
		recv_msg->msg_data[0] = msg->rsp[2];
C
Corey Minyard 已提交
4315
		deliver_local_response(intf, recv_msg);
L
Linus Torvalds 已提交
4316
	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4317
		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4318 4319
		struct ipmi_channel   *chans;

L
Linus Torvalds 已提交
4320 4321 4322 4323 4324 4325 4326 4327
		/* It's from the receive queue. */
		chan = msg->rsp[3] & 0xf;
		if (chan >= IPMI_MAX_CHANNELS) {
			/* Invalid channel number */
			requeue = 0;
			goto out;
		}

D
dann frazier 已提交
4328
		/*
C
Corey Minyard 已提交
4329 4330 4331 4332 4333
		 * We need to make sure the channels have been initialized.
		 * The channel_handler routine will set the "curr_channel"
		 * equal to or greater than IPMI_MAX_CHANNELS when all the
		 * channels for this interface have been initialized.
		 */
4334
		if (!intf->channels_ready) {
C
Corey Minyard 已提交
4335
			requeue = 0; /* Throw the message away */
D
dann frazier 已提交
4336 4337 4338
			goto out;
		}

4339 4340 4341
		chans = READ_ONCE(intf->channel_list)->c;

		switch (chans[chan].medium) {
L
Linus Torvalds 已提交
4342 4343
		case IPMI_CHANNEL_MEDIUM_IPMB:
			if (msg->rsp[4] & 0x04) {
4344 4345 4346 4347
				/*
				 * It's a response, so find the
				 * requesting message and send it up.
				 */
L
Linus Torvalds 已提交
4348 4349
				requeue = handle_ipmb_get_msg_rsp(intf, msg);
			} else {
4350 4351 4352 4353
				/*
				 * It's a command to the SMS from some other
				 * entity.  Handle that.
				 */
L
Linus Torvalds 已提交
4354 4355 4356 4357 4358 4359 4360
				requeue = handle_ipmb_get_msg_cmd(intf, msg);
			}
			break;

		case IPMI_CHANNEL_MEDIUM_8023LAN:
		case IPMI_CHANNEL_MEDIUM_ASYNC:
			if (msg->rsp[6] & 0x04) {
4361 4362 4363 4364
				/*
				 * It's a response, so find the
				 * requesting message and send it up.
				 */
L
Linus Torvalds 已提交
4365 4366
				requeue = handle_lan_get_msg_rsp(intf, msg);
			} else {
4367 4368 4369 4370
				/*
				 * It's a command to the SMS from some other
				 * entity.  Handle that.
				 */
L
Linus Torvalds 已提交
4371 4372 4373 4374 4375
				requeue = handle_lan_get_msg_cmd(intf, msg);
			}
			break;

		default:
D
dann frazier 已提交
4376 4377
			/* Check for OEM Channels.  Clients had better
			   register for these commands. */
4378 4379
			if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
			    && (chans[chan].medium
D
dann frazier 已提交
4380 4381 4382 4383 4384 4385 4386 4387 4388
				<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
				requeue = handle_oem_get_msg_cmd(intf, msg);
			} else {
				/*
				 * We don't handle the channel type, so just
				 * free the message.
				 */
				requeue = 0;
			}
L
Linus Torvalds 已提交
4389 4390 4391
		}

	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4392
		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4393
		/* It's an asynchronous event. */
L
Linus Torvalds 已提交
4394 4395 4396 4397 4398 4399 4400 4401 4402 4403
		requeue = handle_read_event_rsp(intf, msg);
	} else {
		/* It's a response from the local BMC. */
		requeue = handle_bmc_rsp(intf, msg);
	}

 out:
	return requeue;
}

4404 4405 4406
/*
 * If there are messages in the queue or pretimeouts, handle them.
 */
4407
static void handle_new_recv_msgs(struct ipmi_smi *intf)
4408 4409 4410 4411 4412 4413 4414 4415
{
	struct ipmi_smi_msg  *smi_msg;
	unsigned long        flags = 0;
	int                  rv;
	int                  run_to_completion = intf->run_to_completion;

	/* See if any waiting messages need to be processed. */
	if (!run_to_completion)
4416 4417 4418
		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
	while (!list_empty(&intf->waiting_rcv_msgs)) {
		smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4419
				     struct ipmi_smi_msg, link);
4420
		list_del(&smi_msg->link);
4421
		if (!run_to_completion)
4422 4423
			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
					       flags);
4424 4425
		rv = handle_one_recv_msg(intf, smi_msg);
		if (!run_to_completion)
4426
			spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4427
		if (rv > 0) {
4428 4429
			/*
			 * To preserve message order, quit if we
4430 4431 4432 4433
			 * can't handle a message.  Add the message
			 * back at the head, this is safe because this
			 * tasklet is the only thing that pulls the
			 * messages.
4434
			 */
4435
			list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4436
			break;
4437 4438 4439 4440 4441
		} else {
			if (rv == 0)
				/* Message handled */
				ipmi_free_smi_msg(smi_msg);
			/* If rv < 0, fatal error, del but don't free. */
4442 4443 4444
		}
	}
	if (!run_to_completion)
4445
		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4446 4447 4448 4449 4450 4451

	/*
	 * If the pretimout count is non-zero, decrement one from it and
	 * deliver pretimeouts to all the users.
	 */
	if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4452
		struct ipmi_user *user;
4453
		int index;
4454

4455
		index = srcu_read_lock(&intf->users_srcu);
4456 4457 4458 4459 4460
		list_for_each_entry_rcu(user, &intf->users, link) {
			if (user->handler->ipmi_watchdog_pretimeout)
				user->handler->ipmi_watchdog_pretimeout(
					user->handler_data);
		}
4461
		srcu_read_unlock(&intf->users_srcu, index);
4462 4463 4464 4465 4466
	}
}

static void smi_recv_tasklet(unsigned long val)
{
4467
	unsigned long flags = 0; /* keep us warning-free. */
4468
	struct ipmi_smi *intf = (struct ipmi_smi *) val;
4469 4470 4471 4472 4473 4474 4475 4476 4477 4478
	int run_to_completion = intf->run_to_completion;
	struct ipmi_smi_msg *newmsg = NULL;

	/*
	 * Start the next message if available.
	 *
	 * Do this here, not in the actual receiver, because we may deadlock
	 * because the lower layer is allowed to hold locks while calling
	 * message delivery.
	 */
4479 4480 4481

	rcu_read_lock();

4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498
	if (!run_to_completion)
		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
	if (intf->curr_msg == NULL && !intf->in_shutdown) {
		struct list_head *entry = NULL;

		/* Pick the high priority queue first. */
		if (!list_empty(&intf->hp_xmit_msgs))
			entry = intf->hp_xmit_msgs.next;
		else if (!list_empty(&intf->xmit_msgs))
			entry = intf->xmit_msgs.next;

		if (entry) {
			list_del(entry);
			newmsg = list_entry(entry, struct ipmi_smi_msg, link);
			intf->curr_msg = newmsg;
		}
	}
4499

4500 4501 4502
	if (!run_to_completion)
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
	if (newmsg)
4503
		intf->handlers->sender(intf->send_info, newmsg);
4504

4505 4506
	rcu_read_unlock();

4507
	handle_new_recv_msgs(intf);
4508 4509
}

L
Linus Torvalds 已提交
4510
/* Handle a new message from the lower layer. */
4511
void ipmi_smi_msg_received(struct ipmi_smi *intf,
L
Linus Torvalds 已提交
4512 4513
			   struct ipmi_smi_msg *msg)
{
4514
	unsigned long flags = 0; /* keep us warning-free. */
4515
	int run_to_completion = intf->run_to_completion;
L
Linus Torvalds 已提交
4516

4517 4518 4519 4520 4521 4522 4523 4524 4525 4526
	/*
	 * To preserve message order, we keep a queue and deliver from
	 * a tasklet.
	 */
	if (!run_to_completion)
		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
	list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
	if (!run_to_completion)
		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
				       flags);
L
Linus Torvalds 已提交
4527

4528
	if (!run_to_completion)
4529
		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4530 4531 4532 4533
	/*
	 * We can get an asynchronous event or receive message in addition
	 * to commands we send.
	 */
4534 4535
	if (msg == intf->curr_msg)
		intf->curr_msg = NULL;
4536
	if (!run_to_completion)
4537
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4538

4539 4540 4541 4542
	if (run_to_completion)
		smi_recv_tasklet((unsigned long) intf);
	else
		tasklet_schedule(&intf->recv_tasklet);
L
Linus Torvalds 已提交
4543
}
4544
EXPORT_SYMBOL(ipmi_smi_msg_received);
L
Linus Torvalds 已提交
4545

4546
void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
L
Linus Torvalds 已提交
4547
{
4548 4549 4550
	if (intf->in_shutdown)
		return;

4551 4552
	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
	tasklet_schedule(&intf->recv_tasklet);
L
Linus Torvalds 已提交
4553
}
4554
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
L
Linus Torvalds 已提交
4555

C
Corey Minyard 已提交
4556
static struct ipmi_smi_msg *
4557
smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
C
Corey Minyard 已提交
4558
		  unsigned char seq, long seqid)
L
Linus Torvalds 已提交
4559
{
C
Corey Minyard 已提交
4560
	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
L
Linus Torvalds 已提交
4561
	if (!smi_msg)
4562 4563 4564 4565
		/*
		 * If we can't allocate the message, then just return, we
		 * get 4 retries, so this should be ok.
		 */
C
Corey Minyard 已提交
4566
		return NULL;
L
Linus Torvalds 已提交
4567 4568 4569 4570

	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
	smi_msg->data_size = recv_msg->msg.data_len;
	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4571

4572
	pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
C
Corey Minyard 已提交
4573

C
Corey Minyard 已提交
4574
	return smi_msg;
L
Linus Torvalds 已提交
4575 4576
}

4577
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4578 4579
			      struct list_head *timeouts,
			      unsigned long timeout_period,
4580
			      int slot, unsigned long *flags,
4581
			      bool *need_timer)
4582
{
4583
	struct ipmi_recv_msg *msg;
4584

4585
	if (intf->in_shutdown)
4586
		return;
4587 4588 4589 4590

	if (!ent->inuse)
		return;

4591 4592
	if (timeout_period < ent->timeout) {
		ent->timeout -= timeout_period;
4593
		*need_timer = true;
4594
		return;
4595
	}
4596 4597 4598 4599

	if (ent->retries_left == 0) {
		/* The message has used all its retries. */
		ent->inuse = 0;
4600
		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4601 4602 4603
		msg = ent->recv_msg;
		list_add_tail(&msg->link, timeouts);
		if (ent->broadcast)
4604
			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4605
		else if (is_lan_addr(&ent->recv_msg->addr))
4606
			ipmi_inc_stat(intf, timed_out_lan_commands);
4607
		else
4608
			ipmi_inc_stat(intf, timed_out_ipmb_commands);
4609 4610 4611 4612
	} else {
		struct ipmi_smi_msg *smi_msg;
		/* More retries, send again. */

4613
		*need_timer = true;
4614

4615 4616 4617 4618
		/*
		 * Start with the max timer, set to normal timer after
		 * the message is sent.
		 */
4619 4620 4621 4622
		ent->timeout = MAX_MSG_TIMEOUT;
		ent->retries_left--;
		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
					    ent->seqid);
4623 4624 4625 4626 4627 4628 4629
		if (!smi_msg) {
			if (is_lan_addr(&ent->recv_msg->addr))
				ipmi_inc_stat(intf,
					      dropped_rexmit_lan_commands);
			else
				ipmi_inc_stat(intf,
					      dropped_rexmit_ipmb_commands);
4630
			return;
4631
		}
4632 4633

		spin_unlock_irqrestore(&intf->seq_lock, *flags);
4634

4635 4636 4637 4638 4639 4640 4641
		/*
		 * Send the new message.  We send with a zero
		 * priority.  It timed out, I doubt time is that
		 * critical now, and high priority messages are really
		 * only for messages to the local MC, which don't get
		 * resent.
		 */
4642
		if (intf->handlers) {
4643 4644 4645 4646 4647 4648 4649
			if (is_lan_addr(&ent->recv_msg->addr))
				ipmi_inc_stat(intf,
					      retransmitted_lan_commands);
			else
				ipmi_inc_stat(intf,
					      retransmitted_ipmb_commands);

4650
			smi_send(intf, intf->handlers, smi_msg, 0);
4651
		} else
4652 4653
			ipmi_free_smi_msg(smi_msg);

4654 4655 4656 4657
		spin_lock_irqsave(&intf->seq_lock, *flags);
	}
}

4658 4659
static bool ipmi_timeout_handler(struct ipmi_smi *intf,
				 unsigned long timeout_period)
L
Linus Torvalds 已提交
4660 4661 4662 4663
{
	struct list_head     timeouts;
	struct ipmi_recv_msg *msg, *msg2;
	unsigned long        flags;
4664
	int                  i;
4665
	bool                 need_timer = false;
L
Linus Torvalds 已提交
4666

4667 4668 4669 4670
	if (!intf->bmc_registered) {
		kref_get(&intf->refcount);
		if (!schedule_work(&intf->bmc_reg_work)) {
			kref_put(&intf->refcount, intf_free);
4671
			need_timer = true;
4672 4673 4674
		}
	}

4675 4676 4677 4678 4679 4680 4681
	/*
	 * Go through the seq table and find any messages that
	 * have timed out, putting them in the timeouts
	 * list.
	 */
	INIT_LIST_HEAD(&timeouts);
	spin_lock_irqsave(&intf->seq_lock, flags);
4682 4683 4684 4685 4686 4687
	if (intf->ipmb_maintenance_mode_timeout) {
		if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
			intf->ipmb_maintenance_mode_timeout = 0;
		else
			intf->ipmb_maintenance_mode_timeout -= timeout_period;
	}
4688
	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4689
		check_msg_timeout(intf, &intf->seq_table[i],
4690
				  &timeouts, timeout_period, i,
4691
				  &flags, &need_timer);
4692
	spin_unlock_irqrestore(&intf->seq_lock, flags);
4693

4694
	list_for_each_entry_safe(msg, msg2, &timeouts, link)
C
Corey Minyard 已提交
4695
		deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
C
Corey Minyard 已提交
4696

4697 4698 4699 4700 4701 4702 4703 4704 4705 4706
	/*
	 * Maintenance mode handling.  Check the timeout
	 * optimistically before we claim the lock.  It may
	 * mean a timeout gets missed occasionally, but that
	 * only means the timeout gets extended by one period
	 * in that case.  No big deal, and it avoids the lock
	 * most of the time.
	 */
	if (intf->auto_maintenance_timeout > 0) {
		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
C
Corey Minyard 已提交
4707
		if (intf->auto_maintenance_timeout > 0) {
4708 4709 4710 4711
			intf->auto_maintenance_timeout
				-= timeout_period;
			if (!intf->maintenance_mode
			    && (intf->auto_maintenance_timeout <= 0)) {
C
Corey Minyard 已提交
4712
				intf->maintenance_mode_enable = false;
4713
				maintenance_mode_update(intf);
C
Corey Minyard 已提交
4714 4715
			}
		}
4716 4717
		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
				       flags);
L
Linus Torvalds 已提交
4718
	}
4719 4720 4721

	tasklet_schedule(&intf->recv_tasklet);

4722
	return need_timer;
L
Linus Torvalds 已提交
4723 4724
}

4725
static void ipmi_request_event(struct ipmi_smi *intf)
L
Linus Torvalds 已提交
4726
{
4727 4728 4729
	/* No event requests when in maintenance mode. */
	if (intf->maintenance_mode_enable)
		return;
C
Corey Minyard 已提交
4730

4731 4732
	if (!intf->in_shutdown)
		intf->handlers->request_events(intf->send_info);
L
Linus Torvalds 已提交
4733 4734 4735 4736
}

static struct timer_list ipmi_timer;

4737
static atomic_t stop_operation;
L
Linus Torvalds 已提交
4738

4739
static void ipmi_timeout(struct timer_list *unused)
L
Linus Torvalds 已提交
4740
{
4741
	struct ipmi_smi *intf;
4742
	bool need_timer = false;
4743
	int index;
4744

4745
	if (atomic_read(&stop_operation))
L
Linus Torvalds 已提交
4746 4747
		return;

4748
	index = srcu_read_lock(&ipmi_interfaces_srcu);
4749 4750 4751 4752 4753 4754 4755
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		if (atomic_read(&intf->event_waiters)) {
			intf->ticks_to_req_ev--;
			if (intf->ticks_to_req_ev == 0) {
				ipmi_request_event(intf);
				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
			}
4756
			need_timer = true;
4757 4758
		}

4759
		need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4760
	}
4761
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
4762

4763
	if (need_timer)
4764
		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
L
Linus Torvalds 已提交
4765 4766
}

4767
static void need_waiter(struct ipmi_smi *intf)
4768 4769 4770 4771 4772
{
	/* Racy, but worst case we start the timer twice. */
	if (!timer_pending(&ipmi_timer))
		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
L
Linus Torvalds 已提交
4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793

static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);

static void free_smi_msg(struct ipmi_smi_msg *msg)
{
	atomic_dec(&smi_msg_inuse_count);
	kfree(msg);
}

struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
{
	struct ipmi_smi_msg *rv;
	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
	if (rv) {
		rv->done = free_smi_msg;
		rv->user_data = NULL;
		atomic_inc(&smi_msg_inuse_count);
	}
	return rv;
}
4794
EXPORT_SYMBOL(ipmi_alloc_smi_msg);
L
Linus Torvalds 已提交
4795 4796 4797 4798 4799 4800 4801

static void free_recv_msg(struct ipmi_recv_msg *msg)
{
	atomic_dec(&recv_msg_inuse_count);
	kfree(msg);
}

A
Adrian Bunk 已提交
4802
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
L
Linus Torvalds 已提交
4803 4804 4805 4806 4807
{
	struct ipmi_recv_msg *rv;

	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
	if (rv) {
4808
		rv->user = NULL;
L
Linus Torvalds 已提交
4809 4810 4811 4812 4813 4814
		rv->done = free_recv_msg;
		atomic_inc(&recv_msg_inuse_count);
	}
	return rv;
}

4815 4816 4817 4818 4819 4820
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
	if (msg->user)
		kref_put(&msg->user->refcount, free_user);
	msg->done(msg);
}
4821
EXPORT_SYMBOL(ipmi_free_recv_msg);
4822

4823 4824
static atomic_t panic_done_count = ATOMIC_INIT(0);

L
Linus Torvalds 已提交
4825 4826
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
{
4827
	atomic_dec(&panic_done_count);
L
Linus Torvalds 已提交
4828 4829 4830 4831
}

static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
{
4832 4833 4834 4835 4836 4837
	atomic_dec(&panic_done_count);
}

/*
 * Inside a panic, send a message and wait for a response.
 */
4838 4839
static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
					struct ipmi_addr *addr,
4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857
					struct kernel_ipmi_msg *msg)
{
	struct ipmi_smi_msg  smi_msg;
	struct ipmi_recv_msg recv_msg;
	int rv;

	smi_msg.done = dummy_smi_done_handler;
	recv_msg.done = dummy_recv_done_handler;
	atomic_add(2, &panic_done_count);
	rv = i_ipmi_request(NULL,
			    intf,
			    addr,
			    0,
			    msg,
			    intf,
			    &smi_msg,
			    &recv_msg,
			    0,
4858 4859
			    intf->addrinfo[0].address,
			    intf->addrinfo[0].lun,
4860 4861 4862
			    0, 1); /* Don't retry, and don't wait. */
	if (rv)
		atomic_sub(2, &panic_done_count);
4863 4864 4865
	else if (intf->handlers->flush_messages)
		intf->handlers->flush_messages(intf->send_info);

4866 4867
	while (atomic_read(&panic_done_count) != 0)
		ipmi_poll(intf);
L
Linus Torvalds 已提交
4868 4869
}

4870 4871
static void event_receiver_fetcher(struct ipmi_smi *intf,
				   struct ipmi_recv_msg *msg)
L
Linus Torvalds 已提交
4872
{
4873 4874 4875
	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4876
	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
L
Linus Torvalds 已提交
4877
		/* A get event receiver command, save it. */
4878 4879
		intf->event_receiver = msg->msg.data[1];
		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
L
Linus Torvalds 已提交
4880 4881 4882
	}
}

4883
static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
L
Linus Torvalds 已提交
4884
{
4885 4886 4887
	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4888 4889 4890 4891 4892
	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
		/*
		 * A get device id command, save if we are an event
		 * receiver or generator.
		 */
4893 4894
		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
L
Linus Torvalds 已提交
4895 4896 4897
	}
}

4898
static void send_panic_events(struct ipmi_smi *intf, char *str)
L
Linus Torvalds 已提交
4899
{
4900 4901
	struct kernel_ipmi_msg msg;
	unsigned char data[16];
L
Linus Torvalds 已提交
4902
	struct ipmi_system_interface_addr *si;
4903 4904 4905 4906
	struct ipmi_addr addr;
	char *p = str;
	struct ipmi_ipmb_addr *ipmb;
	int j;
L
Linus Torvalds 已提交
4907

4908 4909 4910
	if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
		return;

L
Linus Torvalds 已提交
4911 4912 4913 4914 4915 4916 4917 4918 4919 4920
	si = (struct ipmi_system_interface_addr *) &addr;
	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	si->channel = IPMI_BMC_CHANNEL;
	si->lun = 0;

	/* Fill in an event telling that we have failed. */
	msg.netfn = 0x04; /* Sensor or Event. */
	msg.cmd = 2; /* Platform event command. */
	msg.data = data;
	msg.data_len = 8;
M
Matt Domsch 已提交
4921
	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
L
Linus Torvalds 已提交
4922 4923 4924 4925 4926
	data[1] = 0x03; /* This is for IPMI 1.0. */
	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */

4927 4928 4929 4930
	/*
	 * Put a few breadcrumbs in.  Hopefully later we can add more things
	 * to make the panic events more useful.
	 */
L
Linus Torvalds 已提交
4931 4932 4933 4934 4935 4936
	if (str) {
		data[3] = str[0];
		data[6] = str[1];
		data[7] = str[2];
	}

4937 4938
	/* Send the event announcing the panic. */
	ipmi_panic_request_and_wait(intf, &addr, &msg);
L
Linus Torvalds 已提交
4939

4940 4941 4942 4943
	/*
	 * On every interface, dump a bunch of OEM event holding the
	 * string.
	 */
4944
	if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
L
Linus Torvalds 已提交
4945 4946
		return;

4947 4948 4949 4950 4951 4952 4953
	/*
	 * intf_num is used as an marker to tell if the
	 * interface is valid.  Thus we need a read barrier to
	 * make sure data fetched before checking intf_num
	 * won't be used.
	 */
	smp_rmb();
L
Linus Torvalds 已提交
4954

4955 4956 4957 4958 4959 4960 4961
	/*
	 * First job here is to figure out where to send the
	 * OEM events.  There's no way in IPMI to send OEM
	 * events using an event send command, so we have to
	 * find the SEL to put them in and stick them in
	 * there.
	 */
4962

4963 4964 4965 4966
	/* Get capabilities from the get device id. */
	intf->local_sel_device = 0;
	intf->local_event_generator = 0;
	intf->event_receiver = 0;
L
Linus Torvalds 已提交
4967

4968 4969 4970 4971 4972 4973 4974
	/* Request the device info from the local MC. */
	msg.netfn = IPMI_NETFN_APP_REQUEST;
	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
	msg.data = NULL;
	msg.data_len = 0;
	intf->null_user_handler = device_id_fetcher;
	ipmi_panic_request_and_wait(intf, &addr, &msg);
L
Linus Torvalds 已提交
4975

4976 4977 4978 4979
	if (intf->local_event_generator) {
		/* Request the event receiver from the local MC. */
		msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
		msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
L
Linus Torvalds 已提交
4980 4981
		msg.data = NULL;
		msg.data_len = 0;
4982
		intf->null_user_handler = event_receiver_fetcher;
4983
		ipmi_panic_request_and_wait(intf, &addr, &msg);
4984 4985
	}
	intf->null_user_handler = NULL;
L
Linus Torvalds 已提交
4986

4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015
	/*
	 * Validate the event receiver.  The low bit must not
	 * be 1 (it must be a valid IPMB address), it cannot
	 * be zero, and it must not be my address.
	 */
	if (((intf->event_receiver & 1) == 0)
	    && (intf->event_receiver != 0)
	    && (intf->event_receiver != intf->addrinfo[0].address)) {
		/*
		 * The event receiver is valid, send an IPMB
		 * message.
		 */
		ipmb = (struct ipmi_ipmb_addr *) &addr;
		ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
		ipmb->channel = 0; /* FIXME - is this right? */
		ipmb->lun = intf->event_receiver_lun;
		ipmb->slave_addr = intf->event_receiver;
	} else if (intf->local_sel_device) {
		/*
		 * The event receiver was not valid (or was
		 * me), but I am an SEL device, just dump it
		 * in my SEL.
		 */
		si = (struct ipmi_system_interface_addr *) &addr;
		si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
		si->channel = IPMI_BMC_CHANNEL;
		si->lun = 0;
	} else
		return; /* No where to send the event. */
L
Linus Torvalds 已提交
5016

5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032
	msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
	msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
	msg.data = data;
	msg.data_len = 16;

	j = 0;
	while (*p) {
		int size = strlen(p);

		if (size > 11)
			size = 11;
		data[0] = 0;
		data[1] = 0;
		data[2] = 0xf0; /* OEM event without timestamp. */
		data[3] = intf->addrinfo[0].address;
		data[4] = j++; /* sequence # */
5033
		/*
5034 5035
		 * Always give 11 bytes, so strncpy will fill
		 * it with zeroes for me.
5036
		 */
5037 5038
		strncpy(data+5, p, 11);
		p += size;
L
Linus Torvalds 已提交
5039

5040
		ipmi_panic_request_and_wait(intf, &addr, &msg);
5041
	}
L
Linus Torvalds 已提交
5042 5043
}

R
Randy Dunlap 已提交
5044
static int has_panicked;
L
Linus Torvalds 已提交
5045 5046 5047

static int panic_event(struct notifier_block *this,
		       unsigned long         event,
5048
		       void                  *ptr)
L
Linus Torvalds 已提交
5049
{
5050
	struct ipmi_smi *intf;
5051
	struct ipmi_user *user;
L
Linus Torvalds 已提交
5052

L
Lee Revell 已提交
5053
	if (has_panicked)
L
Linus Torvalds 已提交
5054
		return NOTIFY_DONE;
L
Lee Revell 已提交
5055
	has_panicked = 1;
L
Linus Torvalds 已提交
5056 5057

	/* For every registered interface, set it to run to completion. */
5058
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5059
		if (!intf->handlers || intf->intf_num == -1)
5060
			/* Interface is not ready. */
L
Linus Torvalds 已提交
5061 5062
			continue;

5063 5064 5065
		if (!intf->handlers->poll)
			continue;

5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082
		/*
		 * If we were interrupted while locking xmit_msgs_lock or
		 * waiting_rcv_msgs_lock, the corresponding list may be
		 * corrupted.  In this case, drop items on the list for
		 * the safety.
		 */
		if (!spin_trylock(&intf->xmit_msgs_lock)) {
			INIT_LIST_HEAD(&intf->xmit_msgs);
			INIT_LIST_HEAD(&intf->hp_xmit_msgs);
		} else
			spin_unlock(&intf->xmit_msgs_lock);

		if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
			INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
		else
			spin_unlock(&intf->waiting_rcv_msgs_lock);

5083
		intf->run_to_completion = 1;
5084 5085 5086
		if (intf->handlers->set_run_to_completion)
			intf->handlers->set_run_to_completion(intf->send_info,
							      1);
L
Linus Torvalds 已提交
5087

5088 5089 5090 5091 5092 5093 5094 5095
		list_for_each_entry_rcu(user, &intf->users, link) {
			if (user->handler->ipmi_panic_handler)
				user->handler->ipmi_panic_handler(
					user->handler_data);
		}

		send_panic_events(intf, ptr);
	}
L
Linus Torvalds 已提交
5096 5097 5098 5099

	return NOTIFY_DONE;
}

5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115
/* Must be called with ipmi_interfaces_mutex held. */
static int ipmi_register_driver(void)
{
	int rv;

	if (drvregistered)
		return 0;

	rv = driver_register(&ipmidriver.driver);
	if (rv)
		pr_err("Could not register IPMI driver\n");
	else
		drvregistered = true;
	return rv;
}

L
Linus Torvalds 已提交
5116 5117 5118 5119 5120 5121 5122 5123
static struct notifier_block panic_block = {
	.notifier_call	= panic_event,
	.next		= NULL,
	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
};

static int ipmi_init_msghandler(void)
{
5124
	int rv;
L
Linus Torvalds 已提交
5125

5126 5127 5128 5129
	mutex_lock(&ipmi_interfaces_mutex);
	rv = ipmi_register_driver();
	if (rv)
		goto out;
L
Linus Torvalds 已提交
5130
	if (initialized)
5131
		goto out;
L
Linus Torvalds 已提交
5132

5133
	init_srcu_struct(&ipmi_interfaces_srcu);
L
Linus Torvalds 已提交
5134

5135
	timer_setup(&ipmi_timer, ipmi_timeout, 0);
5136
	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
L
Linus Torvalds 已提交
5137

5138
	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
L
Linus Torvalds 已提交
5139

5140
	initialized = true;
L
Linus Torvalds 已提交
5141

5142 5143 5144
out:
	mutex_unlock(&ipmi_interfaces_mutex);
	return rv;
L
Linus Torvalds 已提交
5145 5146
}

5147
static int __init ipmi_init_msghandler_mod(void)
L
Linus Torvalds 已提交
5148
{
5149 5150 5151 5152 5153 5154 5155 5156 5157
	int rv;

	pr_info("version " IPMI_DRIVER_VERSION "\n");

	mutex_lock(&ipmi_interfaces_mutex);
	rv = ipmi_register_driver();
	mutex_unlock(&ipmi_interfaces_mutex);

	return rv;
L
Linus Torvalds 已提交
5158 5159
}

5160
static void __exit cleanup_ipmi(void)
L
Linus Torvalds 已提交
5161 5162 5163
{
	int count;

5164 5165 5166
	if (initialized) {
		atomic_notifier_chain_unregister(&panic_notifier_list,
						 &panic_block);
L
Linus Torvalds 已提交
5167

5168 5169 5170 5171
		/*
		 * This can't be called if any interfaces exist, so no worry
		 * about shutting down the interfaces.
		 */
L
Linus Torvalds 已提交
5172

5173 5174 5175 5176 5177
		/*
		 * Tell the timer to stop, then wait for it to stop.  This
		 * avoids problems with race conditions removing the timer
		 * here.
		 */
5178
		atomic_set(&stop_operation, 1);
5179
		del_timer_sync(&ipmi_timer);
L
Linus Torvalds 已提交
5180

5181
		initialized = false;
5182

5183 5184 5185 5186 5187 5188 5189
		/* Check for buffer leaks. */
		count = atomic_read(&smi_msg_inuse_count);
		if (count != 0)
			pr_warn("SMI message count %d at exit\n", count);
		count = atomic_read(&recv_msg_inuse_count);
		if (count != 0)
			pr_warn("recv message count %d at exit\n", count);
L
Linus Torvalds 已提交
5190

5191 5192 5193 5194
		cleanup_srcu_struct(&ipmi_interfaces_srcu);
	}
	if (drvregistered)
		driver_unregister(&ipmidriver.driver);
L
Linus Torvalds 已提交
5195 5196 5197 5198 5199
}
module_exit(cleanup_ipmi);

module_init(ipmi_init_msghandler_mod);
MODULE_LICENSE("GPL");
5200
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5201 5202
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
		   " interface.");
5203
MODULE_VERSION(IPMI_DRIVER_VERSION);
5204
MODULE_SOFTDEP("post: ipmi_devintf");