ipmi_si_intf.c 72.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * ipmi_si.c
 *
 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
 * BT).
 *
 * Author: MontaVista Software, Inc.
 *         Corey Minyard <minyard@mvista.com>
 *         source@mvista.com
 *
 * Copyright 2002 MontaVista Software Inc.
12
 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
L
Linus Torvalds 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms of the GNU General Public License as published by the
 *  Free Software Foundation; either version 2 of the License, or (at your
 *  option) any later version.
 *
 *
 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
 *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
 *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
 *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
 *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * This file holds the "policy" for the interface to the SMI state
 * machine.  It does the configuration, handles timers and interrupts,
 * and drives the real SMI state machine.
 */

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
45
#include <linux/seq_file.h>
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/ioport.h>
54
#include <linux/notifier.h>
55
#include <linux/mutex.h>
M
Matt Domsch 已提交
56
#include <linux/kthread.h>
L
Linus Torvalds 已提交
57 58 59
#include <asm/irq.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
60
#include <linux/ipmi.h>
L
Linus Torvalds 已提交
61 62
#include <linux/ipmi_smi.h>
#include <asm/io.h>
63
#include "ipmi_si.h"
64 65
#include <linux/string.h>
#include <linux/ctype.h>
66

67 68 69 70 71
#ifdef CONFIG_PARISC
#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
#include <asm/parisc-device.h>
#endif

72
#define PFX "ipmi_si: "
L
Linus Torvalds 已提交
73 74 75 76 77 78 79 80 81

/* Measure times between events in the driver. */
#undef DEBUG_TIMING

/* Call every 10 ms. */
#define SI_TIMEOUT_TIME_USEC	10000
#define SI_USEC_PER_JIFFY	(1000000/HZ)
#define SI_TIMEOUT_JIFFIES	(SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
82
				      short timeout */
L
Linus Torvalds 已提交
83 84 85 86 87 88 89

enum si_intf_state {
	SI_NORMAL,
	SI_GETTING_FLAGS,
	SI_GETTING_EVENTS,
	SI_CLEARING_FLAGS,
	SI_GETTING_MESSAGES,
90 91
	SI_CHECKING_ENABLES,
	SI_SETTING_ENABLES
L
Linus Torvalds 已提交
92 93 94
	/* FIXME - add watchdog stuff. */
};

95 96 97 98 99
/* Some BT-specific defines we need here. */
#define IPMI_BT_INTMASK_REG		2
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT	2
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT	1

100
static const char * const si_to_str[] = { "kcs", "smic", "bt" };
L
Linus Torvalds 已提交
101

102 103
static int initialized;

104 105 106
/*
 * Indexes into stats[] in smi_info below.
 */
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
enum si_stat_indexes {
	/*
	 * Number of times the driver requested a timer while an operation
	 * was in progress.
	 */
	SI_STAT_short_timeouts = 0,

	/*
	 * Number of times the driver requested a timer while nothing was in
	 * progress.
	 */
	SI_STAT_long_timeouts,

	/* Number of times the interface was idle while being polled. */
	SI_STAT_idles,

	/* Number of interrupts the driver handled. */
	SI_STAT_interrupts,

	/* Number of time the driver got an ATTN from the hardware. */
	SI_STAT_attentions,
128

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
	/* Number of times the driver requested flags from the hardware. */
	SI_STAT_flag_fetches,

	/* Number of times the hardware didn't follow the state machine. */
	SI_STAT_hosed_count,

	/* Number of completed messages. */
	SI_STAT_complete_transactions,

	/* Number of IPMI events received from the hardware. */
	SI_STAT_events,

	/* Number of watchdog pretimeouts. */
	SI_STAT_watchdog_pretimeouts,

144
	/* Number of asynchronous messages received. */
145 146 147 148 149 150
	SI_STAT_incoming_messages,


	/* This *must* remain last, add new values above this. */
	SI_NUM_STATS
};
151

152
struct smi_info {
C
Corey Minyard 已提交
153
	int                    intf_num;
L
Linus Torvalds 已提交
154 155
	ipmi_smi_t             intf;
	struct si_sm_data      *si_sm;
156
	const struct si_sm_handlers *handlers;
L
Linus Torvalds 已提交
157
	spinlock_t             si_lock;
158
	struct ipmi_smi_msg    *waiting_msg;
L
Linus Torvalds 已提交
159 160 161
	struct ipmi_smi_msg    *curr_msg;
	enum si_intf_state     si_state;

162 163 164 165
	/*
	 * Used to handle the various types of I/O that can occur with
	 * IPMI
	 */
L
Linus Torvalds 已提交
166 167
	struct si_sm_io io;

168 169 170 171 172
	/*
	 * Per-OEM handler, called from handle_flags().  Returns 1
	 * when handle_flags() needs to be re-run or 0 indicating it
	 * set si_state itself.
	 */
173 174
	int (*oem_data_avail_handler)(struct smi_info *smi_info);

175 176 177 178 179
	/*
	 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
	 * is set to hold the flags until we are done handling everything
	 * from the flags.
	 */
L
Linus Torvalds 已提交
180 181 182
#define RECEIVE_MSG_AVAIL	0x01
#define EVENT_MSG_BUFFER_FULL	0x02
#define WDT_PRE_TIMEOUT_INT	0x08
183 184 185 186
#define OEM0_DATA_AVAIL     0x20
#define OEM1_DATA_AVAIL     0x40
#define OEM2_DATA_AVAIL     0x80
#define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
187 188
			     OEM1_DATA_AVAIL | \
			     OEM2_DATA_AVAIL)
L
Linus Torvalds 已提交
189 190
	unsigned char       msg_flags;

191
	/* Does the BMC have an event buffer? */
C
Corey Minyard 已提交
192
	bool		    has_event_buffer;
193

194 195 196 197
	/*
	 * If set to true, this will request events the next time the
	 * state machine is idle.
	 */
L
Linus Torvalds 已提交
198 199
	atomic_t            req_events;

200 201 202 203 204
	/*
	 * If true, run the state machine to completion on every send
	 * call.  Generally used after a panic to make sure stuff goes
	 * out.
	 */
C
Corey Minyard 已提交
205
	bool                run_to_completion;
L
Linus Torvalds 已提交
206 207 208 209

	/* The I/O port of an SI interface. */
	int                 port;

210 211 212 213 214
	/*
	 * The space between start addresses of the two ports.  For
	 * instance, if the first port is 0xca2 and the spacing is 4, then
	 * the second port is 0xca6.
	 */
L
Linus Torvalds 已提交
215 216 217 218 219
	unsigned int        spacing;

	/* The timer for this si. */
	struct timer_list   si_timer;

220 221 222
	/* This flag is set, if the timer is running (timer_pending() isn't enough) */
	bool		    timer_running;

L
Linus Torvalds 已提交
223 224 225
	/* The time (in jiffies) the last timeout occurred at. */
	unsigned long       last_timeout_jiffies;

226 227 228
	/* Are we waiting for the events, pretimeouts, received msgs? */
	atomic_t            need_watch;

229 230 231 232 233 234
	/*
	 * The driver will disable interrupts when it gets into a
	 * situation where it cannot handle messages due to lack of
	 * memory.  Once that situation clears up, it will re-enable
	 * interrupts.
	 */
C
Corey Minyard 已提交
235
	bool interrupt_disabled;
L
Linus Torvalds 已提交
236

237 238 239 240 241
	/*
	 * Does the BMC support events?
	 */
	bool supports_event_msg_buff;

242
	/*
243 244 245 246 247 248 249
	 * Can we disable interrupts the global enables receive irq
	 * bit?  There are currently two forms of brokenness, some
	 * systems cannot disable the bit (which is technically within
	 * the spec but a bad idea) and some systems have the bit
	 * forced to zero even though interrupts work (which is
	 * clearly outside the spec).  The next bool tells which form
	 * of brokenness is present.
250
	 */
251 252 253 254 255 256 257
	bool cannot_disable_irq;

	/*
	 * Some systems are broken and cannot set the irq enable
	 * bit, even if they support interrupts.
	 */
	bool irq_enable_broken;
258

259 260 261 262 263
	/*
	 * Did we get an attention that we did not handle?
	 */
	bool got_attn;

264
	/* From the get device id response... */
265
	struct ipmi_device_id device_id;
L
Linus Torvalds 已提交
266

267
	/* Default driver model device. */
268 269
	struct platform_device *pdev;

L
Linus Torvalds 已提交
270
	/* Counters and things for the proc filesystem. */
271
	atomic_t stats[SI_NUM_STATS];
C
Corey Minyard 已提交
272

273
	struct task_struct *thread;
274 275

	struct list_head link;
L
Linus Torvalds 已提交
276 277
};

278 279 280 281 282
#define smi_inc_stat(smi, stat) \
	atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
#define smi_get_stat(smi, stat) \
	((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))

283 284
#define IPMI_MAX_INTFS 4
static int force_kipmid[IPMI_MAX_INTFS];
285
static int num_force_kipmid;
286
#ifdef CONFIG_PCI
C
Corey Minyard 已提交
287
static bool pci_registered;
288
#endif
289
#ifdef CONFIG_PARISC
C
Corey Minyard 已提交
290
static bool parisc_registered;
291
#endif
292

293
static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
294 295
static int num_max_busy_us;

C
Corey Minyard 已提交
296
static bool unload_when_empty = true;
297

298
static int try_smi_init(struct smi_info *smi);
299
static void cleanup_one_si(struct smi_info *to_clean);
300
static void cleanup_ipmi_si(void);
301

302 303 304
#ifdef DEBUG_TIMING
void debug_timestamp(char *msg)
{
305
	struct timespec64 t;
306

307 308
	getnstimeofday64(&t);
	pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
309 310 311 312 313
}
#else
#define debug_timestamp(x)
#endif

314
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
315
static int register_xaction_notifier(struct notifier_block *nb)
316
{
317
	return atomic_notifier_chain_register(&xaction_notifier_list, nb);
318 319
}

L
Linus Torvalds 已提交
320 321 322
static void deliver_recv_msg(struct smi_info *smi_info,
			     struct ipmi_smi_msg *msg)
{
323
	/* Deliver the message to the upper layer. */
C
Corey Minyard 已提交
324 325 326 327
	if (smi_info->intf)
		ipmi_smi_msg_received(smi_info->intf, msg);
	else
		ipmi_free_smi_msg(msg);
L
Linus Torvalds 已提交
328 329
}

C
Corey Minyard 已提交
330
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
L
Linus Torvalds 已提交
331 332 333
{
	struct ipmi_smi_msg *msg = smi_info->curr_msg;

C
Corey Minyard 已提交
334 335 336 337
	if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
		cCode = IPMI_ERR_UNSPECIFIED;
	/* else use it as is */

L
Lucas De Marchi 已提交
338
	/* Make it a response */
L
Linus Torvalds 已提交
339 340
	msg->rsp[0] = msg->data[0] | 4;
	msg->rsp[1] = msg->data[1];
C
Corey Minyard 已提交
341
	msg->rsp[2] = cCode;
L
Linus Torvalds 已提交
342 343 344 345 346 347 348 349 350 351
	msg->rsp_size = 3;

	smi_info->curr_msg = NULL;
	deliver_recv_msg(smi_info, msg);
}

static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
	int              rv;

352
	if (!smi_info->waiting_msg) {
L
Linus Torvalds 已提交
353 354 355 356 357
		smi_info->curr_msg = NULL;
		rv = SI_SM_IDLE;
	} else {
		int err;

358 359
		smi_info->curr_msg = smi_info->waiting_msg;
		smi_info->waiting_msg = NULL;
360
		debug_timestamp("Start2");
361 362
		err = atomic_notifier_call_chain(&xaction_notifier_list,
				0, smi_info);
363 364 365 366
		if (err & NOTIFY_STOP_MASK) {
			rv = SI_SM_CALL_WITHOUT_DELAY;
			goto out;
		}
L
Linus Torvalds 已提交
367 368 369 370
		err = smi_info->handlers->start_transaction(
			smi_info->si_sm,
			smi_info->curr_msg->data,
			smi_info->curr_msg->data_size);
371
		if (err)
C
Corey Minyard 已提交
372
			return_hosed_msg(smi_info, err);
L
Linus Torvalds 已提交
373 374 375

		rv = SI_SM_CALL_WITHOUT_DELAY;
	}
376
out:
L
Linus Torvalds 已提交
377 378 379
	return rv;
}

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
	smi_info->last_timeout_jiffies = jiffies;
	mod_timer(&smi_info->si_timer, new_val);
	smi_info->timer_running = true;
}

/*
 * Start a new message and (re)start the timer and thread.
 */
static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
			  unsigned int size)
{
	smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);

	if (smi_info->thread)
		wake_up_process(smi_info->thread);

	smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
}

static void start_check_enables(struct smi_info *smi_info, bool start_timer)
C
Corey Minyard 已提交
402 403 404 405 406 407
{
	unsigned char msg[2];

	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;

408 409 410 411
	if (start_timer)
		start_new_msg(smi_info, msg, 2);
	else
		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
412
	smi_info->si_state = SI_CHECKING_ENABLES;
C
Corey Minyard 已提交
413 414
}

415
static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
L
Linus Torvalds 已提交
416 417 418 419 420 421 422 423
{
	unsigned char msg[3];

	/* Make sure the watchdog pre-timeout flag is not set at startup. */
	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
	msg[2] = WDT_PRE_TIMEOUT_INT;

424 425 426 427
	if (start_timer)
		start_new_msg(smi_info, msg, 3);
	else
		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
L
Linus Torvalds 已提交
428 429 430
	smi_info->si_state = SI_CLEARING_FLAGS;
}

C
Corey Minyard 已提交
431 432 433 434 435 436
static void start_getting_msg_queue(struct smi_info *smi_info)
{
	smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
	smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
	smi_info->curr_msg->data_size = 2;

437 438
	start_new_msg(smi_info, smi_info->curr_msg->data,
		      smi_info->curr_msg->data_size);
C
Corey Minyard 已提交
439 440 441 442 443 444 445 446 447
	smi_info->si_state = SI_GETTING_MESSAGES;
}

static void start_getting_events(struct smi_info *smi_info)
{
	smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
	smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
	smi_info->curr_msg->data_size = 2;

448 449
	start_new_msg(smi_info, smi_info->curr_msg->data,
		      smi_info->curr_msg->data_size);
C
Corey Minyard 已提交
450 451 452
	smi_info->si_state = SI_GETTING_EVENTS;
}

453 454 455 456 457
/*
 * When we have a situtaion where we run out of memory and cannot
 * allocate messages, we just leave them in the BMC and run the system
 * polled until we can allocate some memory.  Once we have some
 * memory, we will re-enable the interrupt.
458 459 460
 *
 * Note that we cannot just use disable_irq(), since the interrupt may
 * be shared.
461
 */
462
static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
L
Linus Torvalds 已提交
463
{
464
	if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
C
Corey Minyard 已提交
465
		smi_info->interrupt_disabled = true;
466
		start_check_enables(smi_info, start_timer);
C
Corey Minyard 已提交
467
		return true;
L
Linus Torvalds 已提交
468
	}
C
Corey Minyard 已提交
469
	return false;
L
Linus Torvalds 已提交
470 471
}

C
Corey Minyard 已提交
472
static inline bool enable_si_irq(struct smi_info *smi_info)
L
Linus Torvalds 已提交
473
{
474
	if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
C
Corey Minyard 已提交
475
		smi_info->interrupt_disabled = false;
476
		start_check_enables(smi_info, true);
C
Corey Minyard 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
		return true;
	}
	return false;
}

/*
 * Allocate a message.  If unable to allocate, start the interrupt
 * disable process and return NULL.  If able to allocate but
 * interrupts are disabled, free the message and return NULL after
 * starting the interrupt enable process.
 */
static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
{
	struct ipmi_smi_msg *msg;

	msg = ipmi_alloc_smi_msg();
	if (!msg) {
494
		if (!disable_si_irq(smi_info, true))
C
Corey Minyard 已提交
495 496 497 498
			smi_info->si_state = SI_NORMAL;
	} else if (enable_si_irq(smi_info)) {
		ipmi_free_smi_msg(msg);
		msg = NULL;
L
Linus Torvalds 已提交
499
	}
C
Corey Minyard 已提交
500
	return msg;
L
Linus Torvalds 已提交
501 502 503 504
}

static void handle_flags(struct smi_info *smi_info)
{
505
retry:
L
Linus Torvalds 已提交
506 507
	if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
		/* Watchdog pre-timeout */
508
		smi_inc_stat(smi_info, watchdog_pretimeouts);
L
Linus Torvalds 已提交
509

510
		start_clear_flags(smi_info, true);
L
Linus Torvalds 已提交
511
		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
C
Corey Minyard 已提交
512 513
		if (smi_info->intf)
			ipmi_smi_watchdog_pretimeout(smi_info->intf);
L
Linus Torvalds 已提交
514 515
	} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
		/* Messages available. */
C
Corey Minyard 已提交
516 517
		smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
		if (!smi_info->curr_msg)
L
Linus Torvalds 已提交
518 519
			return;

C
Corey Minyard 已提交
520
		start_getting_msg_queue(smi_info);
L
Linus Torvalds 已提交
521 522
	} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
		/* Events available. */
C
Corey Minyard 已提交
523 524
		smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
		if (!smi_info->curr_msg)
L
Linus Torvalds 已提交
525 526
			return;

C
Corey Minyard 已提交
527
		start_getting_events(smi_info);
528
	} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
529
		   smi_info->oem_data_avail_handler) {
530 531
		if (smi_info->oem_data_avail_handler(smi_info))
			goto retry;
532
	} else
L
Linus Torvalds 已提交
533 534 535
		smi_info->si_state = SI_NORMAL;
}

536 537 538 539 540 541
/*
 * Global enables we care about.
 */
#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
			     IPMI_BMC_EVT_MSG_INTR)

542 543
static u8 current_global_enables(struct smi_info *smi_info, u8 base,
				 bool *irq_on)
544 545 546 547 548 549
{
	u8 enables = 0;

	if (smi_info->supports_event_msg_buff)
		enables |= IPMI_BMC_EVT_MSG_BUFF;

550
	if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
551 552
	     smi_info->cannot_disable_irq) &&
	    !smi_info->irq_enable_broken)
553 554 555
		enables |= IPMI_BMC_RCV_MSG_INTR;

	if (smi_info->supports_event_msg_buff &&
556
	    smi_info->io.irq && !smi_info->interrupt_disabled &&
557
	    !smi_info->irq_enable_broken)
558 559
		enables |= IPMI_BMC_EVT_MSG_INTR;

560 561
	*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);

562 563 564
	return enables;
}

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
{
	u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);

	irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;

	if ((bool)irqstate == irq_on)
		return;

	if (irq_on)
		smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
				     IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
	else
		smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
}

L
Linus Torvalds 已提交
581 582 583 584
static void handle_transaction_done(struct smi_info *smi_info)
{
	struct ipmi_smi_msg *msg;

585
	debug_timestamp("Done");
L
Linus Torvalds 已提交
586 587
	switch (smi_info->si_state) {
	case SI_NORMAL:
588
		if (!smi_info->curr_msg)
L
Linus Torvalds 已提交
589 590 591 592 593 594 595 596
			break;

		smi_info->curr_msg->rsp_size
			= smi_info->handlers->get_result(
				smi_info->si_sm,
				smi_info->curr_msg->rsp,
				IPMI_MAX_MSG_LENGTH);

597 598 599 600 601
		/*
		 * Do this here becase deliver_recv_msg() releases the
		 * lock, and a new message can be put in during the
		 * time the lock is released.
		 */
L
Linus Torvalds 已提交
602 603 604 605 606 607 608 609 610 611 612 613 614
		msg = smi_info->curr_msg;
		smi_info->curr_msg = NULL;
		deliver_recv_msg(smi_info, msg);
		break;

	case SI_GETTING_FLAGS:
	{
		unsigned char msg[4];
		unsigned int  len;

		/* We got the flags from the SMI, now handle them. */
		len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
		if (msg[2] != 0) {
615
			/* Error fetching flags, just give up for now. */
L
Linus Torvalds 已提交
616 617
			smi_info->si_state = SI_NORMAL;
		} else if (len < 4) {
618 619 620 621
			/*
			 * Hmm, no flags.  That's technically illegal, but
			 * don't use uninitialized data.
			 */
L
Linus Torvalds 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
			smi_info->si_state = SI_NORMAL;
		} else {
			smi_info->msg_flags = msg[3];
			handle_flags(smi_info);
		}
		break;
	}

	case SI_CLEARING_FLAGS:
	{
		unsigned char msg[3];

		/* We cleared the flags. */
		smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
		if (msg[2] != 0) {
			/* Error clearing flags */
638
			dev_warn(smi_info->io.dev,
639
				 "Error clearing flags: %2.2x\n", msg[2]);
L
Linus Torvalds 已提交
640
		}
641
		smi_info->si_state = SI_NORMAL;
L
Linus Torvalds 已提交
642 643 644 645 646 647 648 649 650 651 652
		break;
	}

	case SI_GETTING_EVENTS:
	{
		smi_info->curr_msg->rsp_size
			= smi_info->handlers->get_result(
				smi_info->si_sm,
				smi_info->curr_msg->rsp,
				IPMI_MAX_MSG_LENGTH);

653 654 655 656 657
		/*
		 * Do this here becase deliver_recv_msg() releases the
		 * lock, and a new message can be put in during the
		 * time the lock is released.
		 */
L
Linus Torvalds 已提交
658 659 660 661 662 663 664 665 666 667
		msg = smi_info->curr_msg;
		smi_info->curr_msg = NULL;
		if (msg->rsp[2] != 0) {
			/* Error getting event, probably done. */
			msg->done(msg);

			/* Take off the event flag. */
			smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
			handle_flags(smi_info);
		} else {
668
			smi_inc_stat(smi_info, events);
L
Linus Torvalds 已提交
669

670 671 672 673 674 675
			/*
			 * Do this before we deliver the message
			 * because delivering the message releases the
			 * lock and something else can mess with the
			 * state.
			 */
L
Linus Torvalds 已提交
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
			handle_flags(smi_info);

			deliver_recv_msg(smi_info, msg);
		}
		break;
	}

	case SI_GETTING_MESSAGES:
	{
		smi_info->curr_msg->rsp_size
			= smi_info->handlers->get_result(
				smi_info->si_sm,
				smi_info->curr_msg->rsp,
				IPMI_MAX_MSG_LENGTH);

691 692 693 694 695
		/*
		 * Do this here becase deliver_recv_msg() releases the
		 * lock, and a new message can be put in during the
		 * time the lock is released.
		 */
L
Linus Torvalds 已提交
696 697 698 699 700 701 702 703 704 705
		msg = smi_info->curr_msg;
		smi_info->curr_msg = NULL;
		if (msg->rsp[2] != 0) {
			/* Error getting event, probably done. */
			msg->done(msg);

			/* Take off the msg flag. */
			smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
			handle_flags(smi_info);
		} else {
706
			smi_inc_stat(smi_info, incoming_messages);
L
Linus Torvalds 已提交
707

708 709 710 711 712 713
			/*
			 * Do this before we deliver the message
			 * because delivering the message releases the
			 * lock and something else can mess with the
			 * state.
			 */
L
Linus Torvalds 已提交
714 715 716 717 718 719 720
			handle_flags(smi_info);

			deliver_recv_msg(smi_info, msg);
		}
		break;
	}

721
	case SI_CHECKING_ENABLES:
L
Linus Torvalds 已提交
722 723
	{
		unsigned char msg[4];
724
		u8 enables;
725
		bool irq_on;
L
Linus Torvalds 已提交
726 727 728 729

		/* We got the flags from the SMI, now handle them. */
		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
		if (msg[2] != 0) {
730
			dev_warn(smi_info->io.dev,
731
				 "Couldn't get irq info: %x.\n", msg[2]);
732
			dev_warn(smi_info->io.dev,
733
				 "Maybe ok, but ipmi might run very slowly.\n");
L
Linus Torvalds 已提交
734
			smi_info->si_state = SI_NORMAL;
735 736
			break;
		}
737
		enables = current_global_enables(smi_info, 0, &irq_on);
738
		if (smi_info->io.si_type == SI_BT)
739 740
			/* BT has its own interrupt enable bit. */
			check_bt_irq(smi_info, irq_on);
741 742
		if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
			/* Enables are not correct, fix them. */
L
Linus Torvalds 已提交
743 744
			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
			msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
745
			msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
L
Linus Torvalds 已提交
746 747
			smi_info->handlers->start_transaction(
				smi_info->si_sm, msg, 3);
748 749 750 751 752 753 754
			smi_info->si_state = SI_SETTING_ENABLES;
		} else if (smi_info->supports_event_msg_buff) {
			smi_info->curr_msg = ipmi_alloc_smi_msg();
			if (!smi_info->curr_msg) {
				smi_info->si_state = SI_NORMAL;
				break;
			}
755
			start_getting_events(smi_info);
756 757
		} else {
			smi_info->si_state = SI_NORMAL;
L
Linus Torvalds 已提交
758 759 760 761
		}
		break;
	}

762
	case SI_SETTING_ENABLES:
L
Linus Torvalds 已提交
763 764 765 766
	{
		unsigned char msg[4];

		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
767
		if (msg[2] != 0)
768
			dev_warn(smi_info->io.dev,
769 770 771 772 773 774 775 776 777
				 "Could not set the global enables: 0x%x.\n",
				 msg[2]);

		if (smi_info->supports_event_msg_buff) {
			smi_info->curr_msg = ipmi_alloc_smi_msg();
			if (!smi_info->curr_msg) {
				smi_info->si_state = SI_NORMAL;
				break;
			}
778
			start_getting_events(smi_info);
C
Corey Minyard 已提交
779
		} else {
780
			smi_info->si_state = SI_NORMAL;
C
Corey Minyard 已提交
781 782 783
		}
		break;
	}
L
Linus Torvalds 已提交
784 785 786
	}
}

787 788 789 790 791
/*
 * Called on timeouts and events.  Timeouts should pass the elapsed
 * time, interrupts should pass in zero.  Must be called with
 * si_lock held and interrupts disabled.
 */
L
Linus Torvalds 已提交
792 793 794 795 796
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
					   int time)
{
	enum si_sm_result si_sm_result;

797
restart:
798 799 800 801 802 803 804 805
	/*
	 * There used to be a loop here that waited a little while
	 * (around 25us) before giving up.  That turned out to be
	 * pointless, the minimum delays I was seeing were in the 300us
	 * range, which is far too long to wait in an interrupt.  So
	 * we just run until the state machine tells us something
	 * happened or it needs a delay.
	 */
L
Linus Torvalds 已提交
806 807 808 809 810
	si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
	time = 0;
	while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);

811
	if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
812
		smi_inc_stat(smi_info, complete_transactions);
L
Linus Torvalds 已提交
813 814

		handle_transaction_done(smi_info);
815
		goto restart;
816
	} else if (si_sm_result == SI_SM_HOSED) {
817
		smi_inc_stat(smi_info, hosed_count);
L
Linus Torvalds 已提交
818

819 820 821 822
		/*
		 * Do the before return_hosed_msg, because that
		 * releases the lock.
		 */
L
Linus Torvalds 已提交
823 824
		smi_info->si_state = SI_NORMAL;
		if (smi_info->curr_msg != NULL) {
825 826 827 828 829
			/*
			 * If we were handling a user message, format
			 * a response to send to the upper layer to
			 * tell it about the error.
			 */
C
Corey Minyard 已提交
830
			return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
L
Linus Torvalds 已提交
831
		}
832
		goto restart;
L
Linus Torvalds 已提交
833 834
	}

835 836 837 838
	/*
	 * We prefer handling attn over new messages.  But don't do
	 * this if there is not yet an upper layer to handle anything.
	 */
839 840
	if (likely(smi_info->intf) &&
	    (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) {
L
Linus Torvalds 已提交
841 842
		unsigned char msg[2];

843 844 845 846 847 848 849 850 851
		if (smi_info->si_state != SI_NORMAL) {
			/*
			 * We got an ATTN, but we are doing something else.
			 * Handle the ATTN later.
			 */
			smi_info->got_attn = true;
		} else {
			smi_info->got_attn = false;
			smi_inc_stat(smi_info, attentions);
L
Linus Torvalds 已提交
852

853 854 855 856 857 858 859 860 861
			/*
			 * Got a attn, send down a get message flags to see
			 * what's causing it.  It would be better to handle
			 * this in the upper layer, but due to the way
			 * interrupts work with the SMI, that's not really
			 * possible.
			 */
			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
			msg[1] = IPMI_GET_MSG_FLAGS_CMD;
L
Linus Torvalds 已提交
862

863
			start_new_msg(smi_info, msg, 2);
864 865 866
			smi_info->si_state = SI_GETTING_FLAGS;
			goto restart;
		}
L
Linus Torvalds 已提交
867 868 869 870
	}

	/* If we are currently idle, try to start the next message. */
	if (si_sm_result == SI_SM_IDLE) {
871
		smi_inc_stat(smi_info, idles);
L
Linus Torvalds 已提交
872 873 874 875

		si_sm_result = start_next_msg(smi_info);
		if (si_sm_result != SI_SM_IDLE)
			goto restart;
876
	}
L
Linus Torvalds 已提交
877 878

	if ((si_sm_result == SI_SM_IDLE)
879 880 881 882 883
	    && (atomic_read(&smi_info->req_events))) {
		/*
		 * We are idle and the upper layer requested that I fetch
		 * events, so do so.
		 */
C
Corey Minyard 已提交
884
		atomic_set(&smi_info->req_events, 0);
L
Linus Torvalds 已提交
885

886 887 888 889 890 891
		/*
		 * Take this opportunity to check the interrupt and
		 * message enable state for the BMC.  The BMC can be
		 * asynchronously reset, and may thus get interrupts
		 * disable and messages disabled.
		 */
892
		if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
893
			start_check_enables(smi_info, true);
894 895 896 897
		} else {
			smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
			if (!smi_info->curr_msg)
				goto out;
L
Linus Torvalds 已提交
898

899 900
			start_getting_events(smi_info);
		}
L
Linus Torvalds 已提交
901 902
		goto restart;
	}
903 904 905 906 907 908 909

	if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
		/* Ok it if fails, the timer will just go off. */
		if (del_timer(&smi_info->si_timer))
			smi_info->timer_running = false;
	}

910
out:
L
Linus Torvalds 已提交
911 912 913
	return si_sm_result;
}

914 915 916 917 918 919 920 921 922 923 924 925 926
static void check_start_timer_thread(struct smi_info *smi_info)
{
	if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
		smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);

		if (smi_info->thread)
			wake_up_process(smi_info->thread);

		start_next_msg(smi_info);
		smi_event_handler(smi_info, 0);
	}
}

927
static void flush_messages(void *send_info)
928
{
929
	struct smi_info *smi_info = send_info;
930 931 932 933 934 935 936 937 938 939 940 941 942
	enum si_sm_result result;

	/*
	 * Currently, this function is called only in run-to-completion
	 * mode.  This means we are single-threaded, no need for locks.
	 */
	result = smi_event_handler(smi_info, 0);
	while (result != SI_SM_IDLE) {
		udelay(SI_SHORT_TIMEOUT_USEC);
		result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
	}
}

L
Linus Torvalds 已提交
943
static void sender(void                *send_info,
944
		   struct ipmi_smi_msg *msg)
L
Linus Torvalds 已提交
945 946 947 948
{
	struct smi_info   *smi_info = send_info;
	unsigned long     flags;

949
	debug_timestamp("Enqueue");
L
Linus Torvalds 已提交
950 951

	if (smi_info->run_to_completion) {
C
Corey Minyard 已提交
952
		/*
953 954
		 * If we are running to completion, start it.  Upper
		 * layer will call flush_messages to clear it out.
C
Corey Minyard 已提交
955
		 */
956
		smi_info->waiting_msg = msg;
L
Linus Torvalds 已提交
957 958 959
		return;
	}

C
Corey Minyard 已提交
960
	spin_lock_irqsave(&smi_info->si_lock, flags);
961 962 963 964 965 966 967 968 969
	/*
	 * The following two lines don't need to be under the lock for
	 * the lock's sake, but they do need SMP memory barriers to
	 * avoid getting things out of order.  We are already claiming
	 * the lock, anyway, so just do it under the lock to avoid the
	 * ordering problem.
	 */
	BUG_ON(smi_info->waiting_msg);
	smi_info->waiting_msg = msg;
970
	check_start_timer_thread(smi_info);
C
Corey Minyard 已提交
971
	spin_unlock_irqrestore(&smi_info->si_lock, flags);
L
Linus Torvalds 已提交
972 973
}

C
Corey Minyard 已提交
974
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
L
Linus Torvalds 已提交
975 976 977 978
{
	struct smi_info   *smi_info = send_info;

	smi_info->run_to_completion = i_run_to_completion;
979 980
	if (i_run_to_completion)
		flush_messages(smi_info);
L
Linus Torvalds 已提交
981 982
}

983 984 985 986 987
/*
 * Use -1 in the nsec value of the busy waiting timespec to tell that
 * we are spinning in kipmid looking for something and not delaying
 * between checks
 */
988
static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
989 990 991
{
	ts->tv_nsec = -1;
}
992
static inline int ipmi_si_is_busy(struct timespec64 *ts)
993 994 995 996
{
	return ts->tv_nsec != -1;
}

997 998
static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
					const struct smi_info *smi_info,
999
					struct timespec64 *busy_until)
1000 1001 1002 1003 1004 1005 1006 1007
{
	unsigned int max_busy_us = 0;

	if (smi_info->intf_num < num_max_busy_us)
		max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
	if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
		ipmi_si_set_not_busy(busy_until);
	else if (!ipmi_si_is_busy(busy_until)) {
1008 1009
		getnstimeofday64(busy_until);
		timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
1010
	} else {
1011 1012 1013 1014
		struct timespec64 now;

		getnstimeofday64(&now);
		if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
			ipmi_si_set_not_busy(busy_until);
			return 0;
		}
	}
	return 1;
}


/*
 * A busy-waiting loop for speeding up IPMI operation.
 *
 * Lousy hardware makes this hard.  This is only enabled for systems
 * that are not BT and do not have interrupts.  It starts spinning
 * when an operation is complete or until max_busy tells it to stop
 * (if that is enabled).  See the paragraph on kimid_max_busy_us in
 * Documentation/IPMI.txt for details.
 */
C
Corey Minyard 已提交
1032 1033 1034
static int ipmi_thread(void *data)
{
	struct smi_info *smi_info = data;
M
Matt Domsch 已提交
1035
	unsigned long flags;
C
Corey Minyard 已提交
1036
	enum si_sm_result smi_result;
1037
	struct timespec64 busy_until;
C
Corey Minyard 已提交
1038

1039
	ipmi_si_set_not_busy(&busy_until);
1040
	set_user_nice(current, MAX_NICE);
M
Matt Domsch 已提交
1041
	while (!kthread_should_stop()) {
1042 1043
		int busy_wait;

C
Corey Minyard 已提交
1044
		spin_lock_irqsave(&(smi_info->si_lock), flags);
1045
		smi_result = smi_event_handler(smi_info, 0);
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056

		/*
		 * If the driver is doing something, there is a possible
		 * race with the timer.  If the timer handler see idle,
		 * and the thread here sees something else, the timer
		 * handler won't restart the timer even though it is
		 * required.  So start it here if necessary.
		 */
		if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
			smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);

C
Corey Minyard 已提交
1057
		spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1058 1059
		busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
						  &busy_until);
1060 1061
		if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
			; /* do nothing */
1062
		else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1063
			schedule();
1064 1065 1066 1067 1068 1069 1070 1071 1072
		else if (smi_result == SI_SM_IDLE) {
			if (atomic_read(&smi_info->need_watch)) {
				schedule_timeout_interruptible(100);
			} else {
				/* Wait to be woken up when we are needed. */
				__set_current_state(TASK_INTERRUPTIBLE);
				schedule();
			}
		} else
1073
			schedule_timeout_interruptible(1);
C
Corey Minyard 已提交
1074 1075 1076 1077 1078
	}
	return 0;
}


L
Linus Torvalds 已提交
1079 1080 1081
static void poll(void *send_info)
{
	struct smi_info *smi_info = send_info;
C
Corey Minyard 已提交
1082
	unsigned long flags = 0;
C
Corey Minyard 已提交
1083
	bool run_to_completion = smi_info->run_to_completion;
L
Linus Torvalds 已提交
1084

C
Corey Minyard 已提交
1085 1086 1087 1088 1089
	/*
	 * Make sure there is some delay in the poll loop so we can
	 * drive time forward and timeout things.
	 */
	udelay(10);
C
Corey Minyard 已提交
1090 1091
	if (!run_to_completion)
		spin_lock_irqsave(&smi_info->si_lock, flags);
C
Corey Minyard 已提交
1092
	smi_event_handler(smi_info, 10);
C
Corey Minyard 已提交
1093 1094
	if (!run_to_completion)
		spin_unlock_irqrestore(&smi_info->si_lock, flags);
L
Linus Torvalds 已提交
1095 1096 1097 1098 1099 1100
}

static void request_events(void *send_info)
{
	struct smi_info *smi_info = send_info;

1101
	if (!smi_info->has_event_buffer)
1102 1103
		return;

L
Linus Torvalds 已提交
1104 1105 1106
	atomic_set(&smi_info->req_events, 1);
}

C
Corey Minyard 已提交
1107
static void set_need_watch(void *send_info, bool enable)
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
{
	struct smi_info *smi_info = send_info;
	unsigned long flags;

	atomic_set(&smi_info->need_watch, enable);
	spin_lock_irqsave(&smi_info->si_lock, flags);
	check_start_timer_thread(smi_info);
	spin_unlock_irqrestore(&smi_info->si_lock, flags);
}

L
Linus Torvalds 已提交
1118 1119 1120 1121 1122 1123
static void smi_timeout(unsigned long data)
{
	struct smi_info   *smi_info = (struct smi_info *) data;
	enum si_sm_result smi_result;
	unsigned long     flags;
	unsigned long     jiffies_now;
C
Corey Minyard 已提交
1124
	long              time_diff;
M
Matthew Garrett 已提交
1125
	long		  timeout;
L
Linus Torvalds 已提交
1126 1127

	spin_lock_irqsave(&(smi_info->si_lock), flags);
1128 1129
	debug_timestamp("Timer");

L
Linus Torvalds 已提交
1130
	jiffies_now = jiffies;
C
Corey Minyard 已提交
1131
	time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
L
Linus Torvalds 已提交
1132 1133 1134
		     * SI_USEC_PER_JIFFY);
	smi_result = smi_event_handler(smi_info, time_diff);

1135
	if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
L
Linus Torvalds 已提交
1136
		/* Running with interrupts, only do long timeouts. */
M
Matthew Garrett 已提交
1137
		timeout = jiffies + SI_TIMEOUT_JIFFIES;
1138
		smi_inc_stat(smi_info, long_timeouts);
M
Matthew Garrett 已提交
1139
		goto do_mod_timer;
L
Linus Torvalds 已提交
1140 1141
	}

1142 1143 1144 1145
	/*
	 * If the state machine asks for a short delay, then shorten
	 * the timer timeout.
	 */
L
Linus Torvalds 已提交
1146
	if (smi_result == SI_SM_CALL_WITH_DELAY) {
1147
		smi_inc_stat(smi_info, short_timeouts);
M
Matthew Garrett 已提交
1148
		timeout = jiffies + 1;
L
Linus Torvalds 已提交
1149
	} else {
1150
		smi_inc_stat(smi_info, long_timeouts);
M
Matthew Garrett 已提交
1151
		timeout = jiffies + SI_TIMEOUT_JIFFIES;
L
Linus Torvalds 已提交
1152 1153
	}

1154
do_mod_timer:
M
Matthew Garrett 已提交
1155
	if (smi_result != SI_SM_IDLE)
1156 1157 1158 1159
		smi_mod_timer(smi_info, timeout);
	else
		smi_info->timer_running = false;
	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
L
Linus Torvalds 已提交
1160 1161
}

1162
irqreturn_t ipmi_si_irq_handler(int irq, void *data)
L
Linus Torvalds 已提交
1163 1164 1165 1166
{
	struct smi_info *smi_info = data;
	unsigned long   flags;

1167 1168 1169 1170 1171 1172
	if (smi_info->io.si_type == SI_BT)
		/* We need to clear the IRQ flag for the BT interface. */
		smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
				     IPMI_BT_INTMASK_CLEAR_IRQ_BIT
				     | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);

L
Linus Torvalds 已提交
1173 1174
	spin_lock_irqsave(&(smi_info->si_lock), flags);

1175
	smi_inc_stat(smi_info, interrupts);
L
Linus Torvalds 已提交
1176

1177 1178
	debug_timestamp("Interrupt");

L
Linus Torvalds 已提交
1179 1180 1181 1182 1183
	smi_event_handler(smi_info, 0);
	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
	return IRQ_HANDLED;
}

1184 1185 1186 1187
static int smi_start_processing(void       *send_info,
				ipmi_smi_t intf)
{
	struct smi_info *new_smi = send_info;
1188
	int             enable = 0;
1189 1190 1191 1192 1193

	new_smi->intf = intf;

	/* Set up the timer that drives the interface. */
	setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1194
	smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1195

1196
	/* Try to claim any interrupts. */
1197 1198 1199 1200
	if (new_smi->io.irq_setup) {
		new_smi->io.irq_handler_data = new_smi;
		new_smi->io.irq_setup(&new_smi->io);
	}
1201

1202 1203 1204 1205 1206
	/*
	 * Check if the user forcefully enabled the daemon.
	 */
	if (new_smi->intf_num < num_force_kipmid)
		enable = force_kipmid[new_smi->intf_num];
1207 1208 1209 1210
	/*
	 * The BT interface is efficient enough to not need a thread,
	 * and there is no need for a thread if we have interrupts.
	 */
1211
	else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
1212 1213 1214
		enable = 1;

	if (enable) {
1215 1216 1217
		new_smi->thread = kthread_run(ipmi_thread, new_smi,
					      "kipmi%d", new_smi->intf_num);
		if (IS_ERR(new_smi->thread)) {
1218
			dev_notice(new_smi->io.dev, "Could not start"
1219 1220 1221
				   " kernel thread due to error %ld, only using"
				   " timers to drive the interface\n",
				   PTR_ERR(new_smi->thread));
1222 1223 1224 1225 1226 1227
			new_smi->thread = NULL;
		}
	}

	return 0;
}
1228

1229 1230 1231 1232
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
{
	struct smi_info *smi = send_info;

1233 1234
	data->addr_src = smi->io.addr_source;
	data->dev = smi->io.dev;
1235
	data->addr_info = smi->io.addr_info;
1236
	get_device(smi->io.dev);
1237 1238 1239 1240

	return 0;
}

C
Corey Minyard 已提交
1241
static void set_maintenance_mode(void *send_info, bool enable)
C
Corey Minyard 已提交
1242 1243 1244 1245 1246 1247 1248
{
	struct smi_info   *smi_info = send_info;

	if (!enable)
		atomic_set(&smi_info->req_events, 0);
}

1249
static const struct ipmi_smi_handlers handlers = {
L
Linus Torvalds 已提交
1250
	.owner                  = THIS_MODULE,
1251
	.start_processing       = smi_start_processing,
1252
	.get_smi_info		= get_smi_info,
L
Linus Torvalds 已提交
1253 1254
	.sender			= sender,
	.request_events		= request_events,
1255
	.set_need_watch		= set_need_watch,
C
Corey Minyard 已提交
1256
	.set_maintenance_mode   = set_maintenance_mode,
L
Linus Torvalds 已提交
1257
	.set_run_to_completion  = set_run_to_completion,
1258
	.flush_messages		= flush_messages,
L
Linus Torvalds 已提交
1259 1260 1261
	.poll			= poll,
};

1262
static LIST_HEAD(smi_infos);
1263
static DEFINE_MUTEX(smi_infos_lock);
1264
static int smi_num; /* Used to sequence the SMIs */
L
Linus Torvalds 已提交
1265

1266
#ifdef CONFIG_PCI
1267
static bool          si_trypci = true;
1268
#endif
L
Linus Torvalds 已提交
1269

1270
static const char * const addr_space_to_str[] = { "i/o", "mem" };
1271

1272 1273
#ifdef CONFIG_PCI
module_param_named(trypci, si_trypci, bool, 0);
1274
MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
1275 1276
		 " default scan of the interfaces identified via pci");
#endif
1277 1278 1279 1280
module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
		 " disabled(0).  Normally the IPMI driver auto-detects"
		 " this, but the value may be overridden by this parm.");
C
Corey Minyard 已提交
1281
module_param(unload_when_empty, bool, 0);
1282 1283 1284
MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
		 " specified or found, default is 1.  Setting to 0"
		 " is useful for hot add of devices using hotmod.");
1285 1286 1287 1288 1289
module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
MODULE_PARM_DESC(kipmid_max_busy_us,
		 "Max time (in microseconds) to busy-wait for IPMI data before"
		 " sleeping. 0 (default) means to wait forever. Set to 100-500"
		 " if kipmid is using up a lot of CPU time.");
L
Linus Torvalds 已提交
1290

1291 1292 1293 1294 1295 1296 1297
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
	if (io->si_type == SI_BT)
		/* Enable the interrupt in the BT interface. */
		io->outputb(io, IPMI_BT_INTMASK_REG,
			    IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
}
L
Linus Torvalds 已提交
1298

1299
void ipmi_irq_start_cleanup(struct si_sm_io *io)
L
Linus Torvalds 已提交
1300
{
1301
	if (io->si_type == SI_BT)
1302
		/* Disable the interrupt in the BT interface. */
1303 1304 1305 1306 1307 1308 1309
		io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}

static void std_irq_cleanup(struct si_sm_io *io)
{
	ipmi_irq_start_cleanup(io);
	free_irq(io->irq, io->irq_handler_data);
L
Linus Torvalds 已提交
1310 1311
}

1312
int ipmi_std_irq_setup(struct si_sm_io *io)
L
Linus Torvalds 已提交
1313 1314 1315
{
	int rv;

1316
	if (!io->irq)
L
Linus Torvalds 已提交
1317 1318
		return 0;

1319 1320 1321 1322 1323
	rv = request_irq(io->irq,
			 ipmi_si_irq_handler,
			 IRQF_SHARED,
			 DEVICE_NAME,
			 io->irq_handler_data);
L
Linus Torvalds 已提交
1324
	if (rv) {
1325
		dev_warn(io->dev, "%s unable to claim interrupt %d,"
1326
			 " running polled\n",
1327 1328
			 DEVICE_NAME, io->irq);
		io->irq = 0;
L
Linus Torvalds 已提交
1329
	} else {
1330 1331 1332
		io->irq_cleanup = std_irq_cleanup;
		ipmi_irq_finish_setup(io);
		dev_info(io->dev, "Using irq %d\n", io->irq);
L
Linus Torvalds 已提交
1333 1334 1335 1336 1337
	}

	return rv;
}

1338
static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
L
Linus Torvalds 已提交
1339
{
1340
	unsigned int addr = io->addr_data;
L
Linus Torvalds 已提交
1341

1342
	return inb(addr + (offset * io->regspacing));
L
Linus Torvalds 已提交
1343 1344
}

1345
static void port_outb(const struct si_sm_io *io, unsigned int offset,
L
Linus Torvalds 已提交
1346 1347
		      unsigned char b)
{
1348
	unsigned int addr = io->addr_data;
L
Linus Torvalds 已提交
1349

1350
	outb(b, addr + (offset * io->regspacing));
L
Linus Torvalds 已提交
1351 1352
}

1353
static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
L
Linus Torvalds 已提交
1354
{
1355
	unsigned int addr = io->addr_data;
L
Linus Torvalds 已提交
1356

1357
	return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
L
Linus Torvalds 已提交
1358 1359
}

1360
static void port_outw(const struct si_sm_io *io, unsigned int offset,
L
Linus Torvalds 已提交
1361 1362
		      unsigned char b)
{
1363
	unsigned int addr = io->addr_data;
L
Linus Torvalds 已提交
1364

1365
	outw(b << io->regshift, addr + (offset * io->regspacing));
L
Linus Torvalds 已提交
1366 1367
}

1368
static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
L
Linus Torvalds 已提交
1369
{
1370
	unsigned int addr = io->addr_data;
L
Linus Torvalds 已提交
1371

1372
	return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
L
Linus Torvalds 已提交
1373 1374
}

1375
static void port_outl(const struct si_sm_io *io, unsigned int offset,
L
Linus Torvalds 已提交
1376 1377
		      unsigned char b)
{
1378
	unsigned int addr = io->addr_data;
L
Linus Torvalds 已提交
1379

1380
	outl(b << io->regshift, addr+(offset * io->regspacing));
L
Linus Torvalds 已提交
1381 1382
}

1383
static void port_cleanup(struct si_sm_io *io)
L
Linus Torvalds 已提交
1384
{
1385
	unsigned int addr = io->addr_data;
1386
	int          idx;
L
Linus Torvalds 已提交
1387

1388
	if (addr) {
1389 1390 1391
		for (idx = 0; idx < io->io_size; idx++)
			release_region(addr + idx * io->regspacing,
				       io->regsize);
L
Linus Torvalds 已提交
1392 1393 1394
	}
}

1395
static int port_setup(struct si_sm_io *io)
L
Linus Torvalds 已提交
1396
{
1397
	unsigned int addr = io->addr_data;
1398
	int          idx;
L
Linus Torvalds 已提交
1399

1400
	if (!addr)
L
Linus Torvalds 已提交
1401 1402
		return -ENODEV;

1403
	io->io_cleanup = port_cleanup;
L
Linus Torvalds 已提交
1404

1405 1406 1407 1408
	/*
	 * Figure out the actual inb/inw/inl/etc routine to use based
	 * upon the register size.
	 */
1409
	switch (io->regsize) {
L
Linus Torvalds 已提交
1410
	case 1:
1411 1412
		io->inputb = port_inb;
		io->outputb = port_outb;
L
Linus Torvalds 已提交
1413 1414
		break;
	case 2:
1415 1416
		io->inputb = port_inw;
		io->outputb = port_outw;
L
Linus Torvalds 已提交
1417 1418
		break;
	case 4:
1419 1420
		io->inputb = port_inl;
		io->outputb = port_outl;
L
Linus Torvalds 已提交
1421 1422
		break;
	default:
1423 1424
		dev_warn(io->dev, "Invalid register size: %d\n",
			 io->regsize);
L
Linus Torvalds 已提交
1425 1426 1427
		return -EINVAL;
	}

1428 1429
	/*
	 * Some BIOSes reserve disjoint I/O regions in their ACPI
1430 1431 1432 1433
	 * tables.  This causes problems when trying to register the
	 * entire I/O region.  Therefore we must register each I/O
	 * port separately.
	 */
1434 1435 1436
	for (idx = 0; idx < io->io_size; idx++) {
		if (request_region(addr + idx * io->regspacing,
				   io->regsize, DEVICE_NAME) == NULL) {
1437
			/* Undo allocations */
1438
			while (idx--)
1439 1440
				release_region(addr + idx * io->regspacing,
					       io->regsize);
1441 1442 1443
			return -EIO;
		}
	}
L
Linus Torvalds 已提交
1444 1445 1446
	return 0;
}

1447 1448
static unsigned char intf_mem_inb(const struct si_sm_io *io,
				  unsigned int offset)
L
Linus Torvalds 已提交
1449 1450 1451 1452
{
	return readb((io->addr)+(offset * io->regspacing));
}

1453 1454
static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
			  unsigned char b)
L
Linus Torvalds 已提交
1455 1456 1457 1458
{
	writeb(b, (io->addr)+(offset * io->regspacing));
}

1459 1460
static unsigned char intf_mem_inw(const struct si_sm_io *io,
				  unsigned int offset)
L
Linus Torvalds 已提交
1461 1462
{
	return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1463
		& 0xff;
L
Linus Torvalds 已提交
1464 1465
}

1466 1467
static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
			  unsigned char b)
L
Linus Torvalds 已提交
1468 1469 1470 1471
{
	writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
}

1472 1473
static unsigned char intf_mem_inl(const struct si_sm_io *io,
				  unsigned int offset)
L
Linus Torvalds 已提交
1474 1475
{
	return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1476
		& 0xff;
L
Linus Torvalds 已提交
1477 1478
}

1479 1480
static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
			  unsigned char b)
L
Linus Torvalds 已提交
1481 1482 1483 1484 1485
{
	writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
}

#ifdef readq
1486
static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
L
Linus Torvalds 已提交
1487 1488
{
	return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1489
		& 0xff;
L
Linus Torvalds 已提交
1490 1491
}

1492
static void mem_outq(const struct si_sm_io *io, unsigned int offset,
L
Linus Torvalds 已提交
1493 1494 1495 1496 1497 1498
		     unsigned char b)
{
	writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
}
#endif

1499
static void mem_region_cleanup(struct si_sm_io *io, int num)
L
Linus Torvalds 已提交
1500
{
1501
	unsigned long addr = io->addr_data;
1502 1503 1504
	int idx;

	for (idx = 0; idx < num; idx++)
1505 1506
		release_mem_region(addr + idx * io->regspacing,
				   io->regsize);
1507
}
L
Linus Torvalds 已提交
1508

1509
static void mem_cleanup(struct si_sm_io *io)
1510
{
1511 1512 1513
	if (io->addr) {
		iounmap(io->addr);
		mem_region_cleanup(io, io->io_size);
L
Linus Torvalds 已提交
1514 1515 1516
	}
}

1517
static int mem_setup(struct si_sm_io *io)
L
Linus Torvalds 已提交
1518
{
1519
	unsigned long addr = io->addr_data;
1520
	int           mapsize, idx;
L
Linus Torvalds 已提交
1521

1522
	if (!addr)
L
Linus Torvalds 已提交
1523 1524
		return -ENODEV;

1525
	io->io_cleanup = mem_cleanup;
L
Linus Torvalds 已提交
1526

1527 1528 1529 1530
	/*
	 * Figure out the actual readb/readw/readl/etc routine to use based
	 * upon the register size.
	 */
1531
	switch (io->regsize) {
L
Linus Torvalds 已提交
1532
	case 1:
1533 1534
		io->inputb = intf_mem_inb;
		io->outputb = intf_mem_outb;
L
Linus Torvalds 已提交
1535 1536
		break;
	case 2:
1537 1538
		io->inputb = intf_mem_inw;
		io->outputb = intf_mem_outw;
L
Linus Torvalds 已提交
1539 1540
		break;
	case 4:
1541 1542
		io->inputb = intf_mem_inl;
		io->outputb = intf_mem_outl;
L
Linus Torvalds 已提交
1543 1544 1545
		break;
#ifdef readq
	case 8:
1546 1547
		io->inputb = mem_inq;
		io->outputb = mem_outq;
L
Linus Torvalds 已提交
1548 1549 1550
		break;
#endif
	default:
1551 1552
		dev_warn(io->dev, "Invalid register size: %d\n",
			 io->regsize);
L
Linus Torvalds 已提交
1553 1554 1555
		return -EINVAL;
	}

1556 1557 1558 1559 1560 1561
	/*
	 * Some BIOSes reserve disjoint memory regions in their ACPI
	 * tables.  This causes problems when trying to request the
	 * entire region.  Therefore we must request each register
	 * separately.
	 */
1562 1563 1564
	for (idx = 0; idx < io->io_size; idx++) {
		if (request_mem_region(addr + idx * io->regspacing,
				       io->regsize, DEVICE_NAME) == NULL) {
1565
			/* Undo allocations */
1566
			mem_region_cleanup(io, idx);
1567 1568 1569 1570
			return -EIO;
		}
	}

1571 1572
	/*
	 * Calculate the total amount of memory to claim.  This is an
L
Linus Torvalds 已提交
1573 1574 1575
	 * unusual looking calculation, but it avoids claiming any
	 * more memory than it has to.  It will claim everything
	 * between the first address to the end of the last full
1576 1577
	 * register.
	 */
1578 1579 1580 1581 1582
	mapsize = ((io->io_size * io->regspacing)
		   - (io->regspacing - io->regsize));
	io->addr = ioremap(addr, mapsize);
	if (io->addr == NULL) {
		mem_region_cleanup(io, io->io_size);
L
Linus Torvalds 已提交
1583 1584 1585 1586 1587
		return -EIO;
	}
	return 0;
}

1588 1589 1590 1591
static struct smi_info *smi_info_alloc(void)
{
	struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);

C
Corey Minyard 已提交
1592
	if (info)
1593 1594 1595 1596
		spin_lock_init(&info->si_lock);
	return info;
}

L
Linus Torvalds 已提交
1597 1598
#ifdef CONFIG_PCI

1599 1600 1601 1602 1603 1604 1605
#define PCI_ERMC_CLASSCODE		0x0C0700
#define PCI_ERMC_CLASSCODE_MASK		0xffffff00
#define PCI_ERMC_CLASSCODE_TYPE_MASK	0xff
#define PCI_ERMC_CLASSCODE_TYPE_SMIC	0x00
#define PCI_ERMC_CLASSCODE_TYPE_KCS	0x01
#define PCI_ERMC_CLASSCODE_TYPE_BT	0x02

L
Linus Torvalds 已提交
1606 1607 1608 1609
#define PCI_HP_VENDOR_ID    0x103C
#define PCI_MMC_DEVICE_ID   0x121A
#define PCI_MMC_ADDR_CW     0x10

1610
static void ipmi_pci_cleanup(struct si_sm_io *io)
1611
{
1612
	struct pci_dev *pdev = io->addr_source_data;
1613 1614 1615

	pci_disable_device(pdev);
}
L
Linus Torvalds 已提交
1616

1617
static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
1618
{
1619
	if (io->si_type == SI_KCS) {
1620 1621 1622
		unsigned char	status;
		int		regspacing;

1623 1624
		io->regsize = DEFAULT_REGSIZE;
		io->regshift = 0;
1625 1626 1627

		/* detect 1, 4, 16byte spacing */
		for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
1628 1629 1630
			io->regspacing = regspacing;
			if (io->io_setup(io)) {
				dev_err(io->dev,
1631 1632 1633 1634
					"Could not setup I/O space\n");
				return DEFAULT_REGSPACING;
			}
			/* write invalid cmd */
1635
			io->outputb(io, 1, 0x10);
1636
			/* read status back */
1637 1638
			status = io->inputb(io, 1);
			io->io_cleanup(io);
1639 1640 1641 1642 1643 1644 1645 1646
			if (status)
				return regspacing;
			regspacing *= 4;
		}
	}
	return DEFAULT_REGSPACING;
}

B
Bill Pemberton 已提交
1647
static int ipmi_pci_probe(struct pci_dev *pdev,
1648
				    const struct pci_device_id *ent)
L
Linus Torvalds 已提交
1649
{
1650 1651
	int rv;
	int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1652
	struct si_sm_io io;
L
Linus Torvalds 已提交
1653

1654 1655
	memset(&io, 0, sizeof(io));
	io.addr_source = SI_PCI;
1656
	dev_info(&pdev->dev, "probing via PCI");
L
Linus Torvalds 已提交
1657

1658 1659
	switch (class_type) {
	case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1660
		io.si_type = SI_SMIC;
1661
		break;
L
Linus Torvalds 已提交
1662

1663
	case PCI_ERMC_CLASSCODE_TYPE_KCS:
1664
		io.si_type = SI_KCS;
1665 1666 1667
		break;

	case PCI_ERMC_CLASSCODE_TYPE_BT:
1668
		io.si_type = SI_BT;
1669 1670 1671
		break;

	default:
1672
		dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
1673
		return -ENOMEM;
L
Linus Torvalds 已提交
1674 1675
	}

1676 1677
	rv = pci_enable_device(pdev);
	if (rv) {
1678
		dev_err(&pdev->dev, "couldn't enable PCI device\n");
1679
		return rv;
L
Linus Torvalds 已提交
1680 1681
	}

1682 1683
	io.addr_source_cleanup = ipmi_pci_cleanup;
	io.addr_source_data = pdev;
L
Linus Torvalds 已提交
1684

1685
	if (pci_resource_flags(pdev, 0) & IORESOURCE_IO)
1686
		io.addr_type = IPMI_IO_ADDR_SPACE;
1687
	else
1688 1689
		io.addr_type = IPMI_MEM_ADDR_SPACE;
	io.addr_data = pci_resource_start(pdev, 0);
L
Linus Torvalds 已提交
1690

1691 1692 1693
	io.regspacing = ipmi_pci_probe_regspacing(&io);
	io.regsize = DEFAULT_REGSIZE;
	io.regshift = 0;
L
Linus Torvalds 已提交
1694

1695 1696 1697
	io.irq = pdev->irq;
	if (io.irq)
		io.irq_setup = ipmi_std_irq_setup;
L
Linus Torvalds 已提交
1698

1699
	io.dev = &pdev->dev;
1700

1701
	dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
1702
		&pdev->resource[0], io.regsize, io.regspacing, io.irq);
1703

1704 1705
	rv = ipmi_si_add_smi(&io);
	if (rv)
C
Corey Minyard 已提交
1706
		pci_disable_device(pdev);
1707

C
Corey Minyard 已提交
1708
	return rv;
1709
}
L
Linus Torvalds 已提交
1710

B
Bill Pemberton 已提交
1711
static void ipmi_pci_remove(struct pci_dev *pdev)
1712
{
1713
	ipmi_si_remove_by_dev(&pdev->dev);
1714
}
L
Linus Torvalds 已提交
1715

1716
static const struct pci_device_id ipmi_pci_devices[] = {
1717
	{ PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1718 1719
	{ PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
	{ 0, }
1720 1721 1722 1723
};
MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);

static struct pci_driver ipmi_pci_driver = {
1724 1725 1726
	.name =         DEVICE_NAME,
	.id_table =     ipmi_pci_devices,
	.probe =        ipmi_pci_probe,
1727
	.remove =       ipmi_pci_remove,
1728 1729
};
#endif /* CONFIG_PCI */
L
Linus Torvalds 已提交
1730

1731
#ifdef CONFIG_PARISC
1732
static int __init ipmi_parisc_probe(struct parisc_device *dev)
1733
{
1734
	struct si_sm_io io;
1735

1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
	io.si_type	= SI_KCS;
	io.addr_source	= SI_DEVICETREE;
	io.addr_type	= IPMI_MEM_ADDR_SPACE;
	io.addr_data	= dev->hpa.start;
	io.regsize	= 1;
	io.regspacing	= 1;
	io.regshift	= 0;
	io.irq		= 0; /* no interrupt */
	io.irq_setup	= NULL;
	io.dev		= &dev->dev;
1746

1747
	dev_dbg(&dev->dev, "addr 0x%lx\n", io.addr_data);
1748

1749
	return ipmi_si_add_smi(&io);
1750 1751
}

1752
static int __exit ipmi_parisc_remove(struct parisc_device *dev)
1753
{
1754
	return ipmi_si_remove_by_dev(&pdev->dev);
1755 1756
}

1757
static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
1758 1759 1760 1761
	{ HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
	{ 0, }
};

1762 1763 1764
MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);

static struct parisc_driver ipmi_parisc_driver __refdata = {
1765 1766 1767
	.name =		"ipmi",
	.id_table =	ipmi_parisc_tbl,
	.probe =	ipmi_parisc_probe,
1768
	.remove =	__exit_p(ipmi_parisc_remove),
1769 1770 1771
};
#endif /* CONFIG_PARISC */

1772
static int wait_for_msg_done(struct smi_info *smi_info)
L
Linus Torvalds 已提交
1773
{
1774
	enum si_sm_result     smi_result;
L
Linus Torvalds 已提交
1775 1776

	smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1777
	for (;;) {
C
Corey Minyard 已提交
1778 1779
		if (smi_result == SI_SM_CALL_WITH_DELAY ||
		    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1780
			schedule_timeout_uninterruptible(1);
L
Linus Torvalds 已提交
1781
			smi_result = smi_info->handlers->event(
1782
				smi_info->si_sm, jiffies_to_usecs(1));
1783
		} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
L
Linus Torvalds 已提交
1784 1785
			smi_result = smi_info->handlers->event(
				smi_info->si_sm, 0);
1786
		} else
L
Linus Torvalds 已提交
1787 1788
			break;
	}
1789
	if (smi_result == SI_SM_HOSED)
1790 1791 1792 1793
		/*
		 * We couldn't get the state machine to run, so whatever's at
		 * the port is probably not an IPMI SMI interface.
		 */
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
		return -ENODEV;

	return 0;
}

static int try_get_dev_id(struct smi_info *smi_info)
{
	unsigned char         msg[2];
	unsigned char         *resp;
	unsigned long         resp_len;
	int                   rv = 0;

	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
	if (!resp)
		return -ENOMEM;

	/*
	 * Do a Get Device ID command, since it comes back with some
	 * useful info.
	 */
	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
	msg[1] = IPMI_GET_DEVICE_ID_CMD;
	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);

	rv = wait_for_msg_done(smi_info);
	if (rv)
L
Linus Torvalds 已提交
1820 1821 1822 1823 1824
		goto out;

	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
						  resp, IPMI_MAX_MSG_LENGTH);

C
Corey Minyard 已提交
1825
	/* Check and record info from the get device id, in case we need it. */
1826 1827
	rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
			resp + 2, resp_len - 2, &smi_info->device_id);
L
Linus Torvalds 已提交
1828

1829
out:
L
Linus Torvalds 已提交
1830 1831 1832 1833
	kfree(resp);
	return rv;
}

1834
static int get_global_enables(struct smi_info *smi_info, u8 *enables)
1835 1836 1837 1838 1839 1840 1841
{
	unsigned char         msg[3];
	unsigned char         *resp;
	unsigned long         resp_len;
	int                   rv;

	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1842 1843
	if (!resp)
		return -ENOMEM;
1844 1845 1846 1847 1848 1849 1850

	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);

	rv = wait_for_msg_done(smi_info);
	if (rv) {
1851
		dev_warn(smi_info->io.dev,
1852 1853
			 "Error getting response from get global enables command: %d\n",
			 rv);
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
		goto out;
	}

	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
						  resp, IPMI_MAX_MSG_LENGTH);

	if (resp_len < 4 ||
			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
			resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
			resp[2] != 0) {
1864
		dev_warn(smi_info->io.dev,
1865 1866
			 "Invalid return from get global enables command: %ld %x %x %x\n",
			 resp_len, resp[0], resp[1], resp[2]);
1867 1868
		rv = -EINVAL;
		goto out;
1869 1870
	} else {
		*enables = resp[3];
1871 1872
	}

1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
out:
	kfree(resp);
	return rv;
}

/*
 * Returns 1 if it gets an error from the command.
 */
static int set_global_enables(struct smi_info *smi_info, u8 enables)
{
	unsigned char         msg[3];
	unsigned char         *resp;
	unsigned long         resp_len;
	int                   rv;

	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
	if (!resp)
		return -ENOMEM;
1891 1892 1893

	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1894
	msg[2] = enables;
1895 1896 1897 1898
	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);

	rv = wait_for_msg_done(smi_info);
	if (rv) {
1899
		dev_warn(smi_info->io.dev,
1900 1901
			 "Error getting response from set global enables command: %d\n",
			 rv);
1902 1903 1904 1905 1906 1907 1908 1909 1910
		goto out;
	}

	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
						  resp, IPMI_MAX_MSG_LENGTH);

	if (resp_len < 3 ||
			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
			resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1911
		dev_warn(smi_info->io.dev,
1912 1913
			 "Invalid return from set global enables command: %ld %x %x\n",
			 resp_len, resp[0], resp[1]);
1914 1915 1916 1917
		rv = -EINVAL;
		goto out;
	}

1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
	if (resp[2] != 0)
		rv = 1;

out:
	kfree(resp);
	return rv;
}

/*
 * Some BMCs do not support clearing the receive irq bit in the global
 * enables (even if they don't support interrupts on the BMC).  Check
 * for this and handle it properly.
 */
static void check_clr_rcv_irq(struct smi_info *smi_info)
{
	u8 enables = 0;
	int rv;

	rv = get_global_enables(smi_info, &enables);
	if (!rv) {
		if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
			/* Already clear, should work ok. */
			return;

		enables &= ~IPMI_BMC_RCV_MSG_INTR;
		rv = set_global_enables(smi_info, enables);
	}

	if (rv < 0) {
1947
		dev_err(smi_info->io.dev,
1948 1949 1950 1951 1952
			"Cannot check clearing the rcv irq: %d\n", rv);
		return;
	}

	if (rv) {
1953 1954 1955 1956
		/*
		 * An error when setting the event buffer bit means
		 * clearing the bit is not supported.
		 */
1957
		dev_warn(smi_info->io.dev,
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
			 "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
		smi_info->cannot_disable_irq = true;
	}
}

/*
 * Some BMCs do not support setting the interrupt bits in the global
 * enables even if they support interrupts.  Clearly bad, but we can
 * compensate.
 */
static void check_set_rcv_irq(struct smi_info *smi_info)
{
	u8 enables = 0;
	int rv;

1973
	if (!smi_info->io.irq)
1974 1975 1976 1977 1978 1979 1980 1981 1982
		return;

	rv = get_global_enables(smi_info, &enables);
	if (!rv) {
		enables |= IPMI_BMC_RCV_MSG_INTR;
		rv = set_global_enables(smi_info, enables);
	}

	if (rv < 0) {
1983
		dev_err(smi_info->io.dev,
1984 1985 1986 1987 1988 1989 1990 1991 1992
			"Cannot check setting the rcv irq: %d\n", rv);
		return;
	}

	if (rv) {
		/*
		 * An error when setting the event buffer bit means
		 * setting the bit is not supported.
		 */
1993
		dev_warn(smi_info->io.dev,
1994 1995 1996
			 "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
		smi_info->cannot_disable_irq = true;
		smi_info->irq_enable_broken = true;
1997 1998 1999
	}
}

2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
static int try_enable_event_buffer(struct smi_info *smi_info)
{
	unsigned char         msg[3];
	unsigned char         *resp;
	unsigned long         resp_len;
	int                   rv = 0;

	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
	if (!resp)
		return -ENOMEM;

	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);

	rv = wait_for_msg_done(smi_info);
	if (rv) {
C
Corey Minyard 已提交
2017
		pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
		goto out;
	}

	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
						  resp, IPMI_MAX_MSG_LENGTH);

	if (resp_len < 4 ||
			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
			resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
			resp[2] != 0) {
C
Corey Minyard 已提交
2028
		pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
2029 2030 2031 2032
		rv = -EINVAL;
		goto out;
	}

2033
	if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
2034
		/* buffer is already enabled, nothing to do. */
2035
		smi_info->supports_event_msg_buff = true;
2036
		goto out;
2037
	}
2038 2039 2040 2041 2042 2043 2044 2045

	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
	msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);

	rv = wait_for_msg_done(smi_info);
	if (rv) {
C
Corey Minyard 已提交
2046
		pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
2047 2048 2049 2050 2051 2052 2053 2054 2055
		goto out;
	}

	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
						  resp, IPMI_MAX_MSG_LENGTH);

	if (resp_len < 3 ||
			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
			resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
C
Corey Minyard 已提交
2056
		pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
		rv = -EINVAL;
		goto out;
	}

	if (resp[2] != 0)
		/*
		 * An error when setting the event buffer bit means
		 * that the event buffer is not supported.
		 */
		rv = -ENOENT;
2067 2068 2069
	else
		smi_info->supports_event_msg_buff = true;

2070
out:
2071 2072 2073 2074
	kfree(resp);
	return rv;
}

2075
static int smi_type_proc_show(struct seq_file *m, void *v)
L
Linus Torvalds 已提交
2076
{
2077
	struct smi_info *smi = m->private;
L
Linus Torvalds 已提交
2078

2079
	seq_printf(m, "%s\n", si_to_str[smi->io.si_type]);
2080

2081
	return 0;
L
Linus Torvalds 已提交
2082 2083
}

2084
static int smi_type_proc_open(struct inode *inode, struct file *file)
L
Linus Torvalds 已提交
2085
{
A
Al Viro 已提交
2086
	return single_open(file, smi_type_proc_show, PDE_DATA(inode));
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
}

static const struct file_operations smi_type_proc_ops = {
	.open		= smi_type_proc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static int smi_si_stats_proc_show(struct seq_file *m, void *v)
{
	struct smi_info *smi = m->private;
L
Linus Torvalds 已提交
2099

2100
	seq_printf(m, "interrupts_enabled:    %d\n",
2101
		       smi->io.irq && !smi->interrupt_disabled);
2102
	seq_printf(m, "short_timeouts:        %u\n",
2103
		       smi_get_stat(smi, short_timeouts));
2104
	seq_printf(m, "long_timeouts:         %u\n",
2105
		       smi_get_stat(smi, long_timeouts));
2106
	seq_printf(m, "idles:                 %u\n",
2107
		       smi_get_stat(smi, idles));
2108
	seq_printf(m, "interrupts:            %u\n",
2109
		       smi_get_stat(smi, interrupts));
2110
	seq_printf(m, "attentions:            %u\n",
2111
		       smi_get_stat(smi, attentions));
2112
	seq_printf(m, "flag_fetches:          %u\n",
2113
		       smi_get_stat(smi, flag_fetches));
2114
	seq_printf(m, "hosed_count:           %u\n",
2115
		       smi_get_stat(smi, hosed_count));
2116
	seq_printf(m, "complete_transactions: %u\n",
2117
		       smi_get_stat(smi, complete_transactions));
2118
	seq_printf(m, "events:                %u\n",
2119
		       smi_get_stat(smi, events));
2120
	seq_printf(m, "watchdog_pretimeouts:  %u\n",
2121
		       smi_get_stat(smi, watchdog_pretimeouts));
2122
	seq_printf(m, "incoming_messages:     %u\n",
2123
		       smi_get_stat(smi, incoming_messages));
2124 2125
	return 0;
}
L
Linus Torvalds 已提交
2126

2127 2128
static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
2129
	return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
2130 2131
}

2132 2133 2134 2135 2136 2137 2138 2139
static const struct file_operations smi_si_stats_proc_ops = {
	.open		= smi_si_stats_proc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static int smi_params_proc_show(struct seq_file *m, void *v)
2140
{
2141
	struct smi_info *smi = m->private;
2142

2143 2144
	seq_printf(m,
		   "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2145
		   si_to_str[smi->io.si_type],
2146 2147 2148 2149 2150
		   addr_space_to_str[smi->io.addr_type],
		   smi->io.addr_data,
		   smi->io.regspacing,
		   smi->io.regsize,
		   smi->io.regshift,
2151 2152
		   smi->io.irq,
		   smi->io.slave_addr);
2153

2154
	return 0;
L
Linus Torvalds 已提交
2155 2156
}

2157 2158
static int smi_params_proc_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
2159
	return single_open(file, smi_params_proc_show, PDE_DATA(inode));
2160 2161 2162 2163 2164 2165 2166 2167 2168
}

static const struct file_operations smi_params_proc_ops = {
	.open		= smi_params_proc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

2169 2170 2171 2172 2173 2174 2175 2176 2177
/*
 * oem_data_avail_to_receive_msg_avail
 * @info - smi_info structure with msg_flags set
 *
 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
 * Returns 1 indicating need to re-run handle_flags().
 */
static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
{
C
Corey Minyard 已提交
2178
	smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2179
			       RECEIVE_MSG_AVAIL);
2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
	return 1;
}

/*
 * setup_dell_poweredge_oem_data_handler
 * @info - smi_info.device_id must be populated
 *
 * Systems that match, but have firmware version < 1.40 may assert
 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
 * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
 * as RECEIVE_MSG_AVAIL instead.
 *
 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
 * assert the OEM[012] bits, and if it did, the driver would have to
 * change to handle that properly, we don't actually check for the
 * firmware version.
 * Device ID = 0x20                BMC on PowerEdge 8G servers
 * Device Revision = 0x80
 * Firmware Revision1 = 0x01       BMC version 1.40
 * Firmware Revision2 = 0x40       BCD encoded
 * IPMI Version = 0x51             IPMI 1.5
 * Manufacturer ID = A2 02 00      Dell IANA
 *
C
Corey Minyard 已提交
2204 2205 2206
 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
 *
2207 2208 2209 2210
 */
#define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2211
#define DELL_IANA_MFR_ID 0x0002a2
2212 2213 2214
static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
{
	struct ipmi_device_id *id = &smi_info->device_id;
2215
	if (id->manufacturer_id == DELL_IANA_MFR_ID) {
C
Corey Minyard 已提交
2216 2217
		if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
		    id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2218
		    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
C
Corey Minyard 已提交
2219 2220
			smi_info->oem_data_avail_handler =
				oem_data_avail_to_receive_msg_avail;
2221 2222 2223
		} else if (ipmi_version_major(id) < 1 ||
			   (ipmi_version_major(id) == 1 &&
			    ipmi_version_minor(id) < 5)) {
C
Corey Minyard 已提交
2224 2225 2226
			smi_info->oem_data_avail_handler =
				oem_data_avail_to_receive_msg_avail;
		}
2227 2228 2229
	}
}

2230 2231 2232 2233 2234
#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
static void return_hosed_msg_badsize(struct smi_info *smi_info)
{
	struct ipmi_smi_msg *msg = smi_info->curr_msg;

L
Lucas De Marchi 已提交
2235
	/* Make it a response */
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
	msg->rsp[0] = msg->data[0] | 4;
	msg->rsp[1] = msg->data[1];
	msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
	msg->rsp_size = 3;
	smi_info->curr_msg = NULL;
	deliver_recv_msg(smi_info, msg);
}

/*
 * dell_poweredge_bt_xaction_handler
 * @info - smi_info.device_id must be populated
 *
 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
 * not respond to a Get SDR command if the length of the data
 * requested is exactly 0x3A, which leads to command timeouts and no
 * data returned.  This intercepts such commands, and causes userspace
 * callers to try again with a different-sized buffer, which succeeds.
 */

#define STORAGE_NETFN 0x0A
#define STORAGE_CMD_GET_SDR 0x23
static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
					     unsigned long unused,
					     void *in)
{
	struct smi_info *smi_info = in;
	unsigned char *data = smi_info->curr_msg->data;
	unsigned int size   = smi_info->curr_msg->data_size;
	if (size >= 8 &&
	    (data[0]>>2) == STORAGE_NETFN &&
	    data[1] == STORAGE_CMD_GET_SDR &&
	    data[7] == 0x3A) {
		return_hosed_msg_badsize(smi_info);
		return NOTIFY_STOP;
	}
	return NOTIFY_DONE;
}

static struct notifier_block dell_poweredge_bt_xaction_notifier = {
	.notifier_call	= dell_poweredge_bt_xaction_handler,
};

/*
 * setup_dell_poweredge_bt_xaction_handler
 * @info - smi_info.device_id must be filled in already
 *
 * Fills in smi_info.device_id.start_transaction_pre_hook
 * when we know what function to use there.
 */
static void
setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
	struct ipmi_device_id *id = &smi_info->device_id;
2289
	if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2290
	    smi_info->io.si_type == SI_BT)
2291 2292 2293
		register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}

2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
/*
 * setup_oem_data_handler
 * @info - smi_info.device_id must be filled in already
 *
 * Fills in smi_info.device_id.oem_data_available_handler
 * when we know what function to use there.
 */

static void setup_oem_data_handler(struct smi_info *smi_info)
{
	setup_dell_poweredge_oem_data_handler(smi_info);
}

2307 2308 2309 2310 2311
static void setup_xaction_handlers(struct smi_info *smi_info)
{
	setup_dell_poweredge_bt_xaction_handler(smi_info);
}

2312 2313 2314 2315 2316 2317
static void check_for_broken_irqs(struct smi_info *smi_info)
{
	check_clr_rcv_irq(smi_info);
	check_set_rcv_irq(smi_info);
}

C
Corey Minyard 已提交
2318 2319
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
{
2320 2321 2322
	if (smi_info->thread != NULL)
		kthread_stop(smi_info->thread);
	if (smi_info->timer_running)
2323
		del_timer_sync(&smi_info->si_timer);
C
Corey Minyard 已提交
2324 2325
}

2326
static struct smi_info *find_dup_si(struct smi_info *info)
L
Linus Torvalds 已提交
2327
{
2328
	struct smi_info *e;
L
Linus Torvalds 已提交
2329

2330 2331 2332
	list_for_each_entry(e, &smi_infos, link) {
		if (e->io.addr_type != info->io.addr_type)
			continue;
2333 2334 2335 2336 2337 2338
		if (e->io.addr_data == info->io.addr_data) {
			/*
			 * This is a cheap hack, ACPI doesn't have a defined
			 * slave address but SMBIOS does.  Pick it up from
			 * any source that has it available.
			 */
2339 2340
			if (info->io.slave_addr && !e->io.slave_addr)
				e->io.slave_addr = info->io.slave_addr;
2341
			return e;
2342
		}
2343
	}
L
Linus Torvalds 已提交
2344

2345
	return NULL;
2346
}
L
Linus Torvalds 已提交
2347

2348
int ipmi_si_add_smi(struct si_sm_io *io)
2349
{
2350
	int rv = 0;
2351
	struct smi_info *new_smi, *dup;
2352

2353 2354 2355 2356 2357
	if (!io->io_setup) {
		if (io->addr_type == IPMI_IO_ADDR_SPACE) {
			io->io_setup = port_setup;
		} else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
			io->io_setup = mem_setup;
2358 2359 2360 2361 2362
		} else {
			return -EINVAL;
		}
	}

2363 2364 2365 2366 2367 2368
	new_smi = smi_info_alloc();
	if (!new_smi)
		return -ENOMEM;

	new_smi->io = *io;

2369
	mutex_lock(&smi_infos_lock);
2370 2371
	dup = find_dup_si(new_smi);
	if (dup) {
2372 2373
		if (new_smi->io.addr_source == SI_ACPI &&
		    dup->io.addr_source == SI_SMBIOS) {
2374
			/* We prefer ACPI over SMBIOS. */
2375
			dev_info(dup->io.dev,
2376
				 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
2377
				 si_to_str[new_smi->io.si_type]);
2378 2379
			cleanup_one_si(dup);
		} else {
2380
			dev_info(new_smi->io.dev,
2381
				 "%s-specified %s state machine: duplicate\n",
2382 2383
				 ipmi_addr_src_to_str(new_smi->io.addr_source),
				 si_to_str[new_smi->io.si_type]);
2384 2385 2386
			rv = -EBUSY;
			goto out_err;
		}
2387
	}
L
Linus Torvalds 已提交
2388

C
Corey Minyard 已提交
2389
	pr_info(PFX "Adding %s-specified %s state machine\n",
2390 2391
		ipmi_addr_src_to_str(new_smi->io.addr_source),
		si_to_str[new_smi->io.si_type]);
2392

L
Linus Torvalds 已提交
2393 2394 2395 2396 2397
	/* So we know not to free it unless we have allocated one. */
	new_smi->intf = NULL;
	new_smi->si_sm = NULL;
	new_smi->handlers = NULL;

2398 2399
	list_add_tail(&new_smi->link, &smi_infos);

2400 2401 2402 2403 2404 2405 2406 2407
	if (initialized) {
		rv = try_smi_init(new_smi);
		if (rv) {
			mutex_unlock(&smi_infos_lock);
			cleanup_one_si(new_smi);
			return rv;
		}
	}
2408 2409 2410 2411 2412
out_err:
	mutex_unlock(&smi_infos_lock);
	return rv;
}

T
Tony Camuso 已提交
2413 2414 2415 2416 2417
/*
 * Try to start up an interface.  Must be called with smi_infos_lock
 * held, primarily to keep smi_num consistent, we only one to do these
 * one at a time.
 */
2418 2419 2420 2421
static int try_smi_init(struct smi_info *new_smi)
{
	int rv = 0;
	int i;
2422
	char *init_name = NULL;
2423

C
Corey Minyard 已提交
2424
	pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
2425 2426
		ipmi_addr_src_to_str(new_smi->io.addr_source),
		si_to_str[new_smi->io.si_type],
C
Corey Minyard 已提交
2427 2428
		addr_space_to_str[new_smi->io.addr_type],
		new_smi->io.addr_data,
2429
		new_smi->io.slave_addr, new_smi->io.irq);
2430

2431
	switch (new_smi->io.si_type) {
2432
	case SI_KCS:
L
Linus Torvalds 已提交
2433
		new_smi->handlers = &kcs_smi_handlers;
2434 2435 2436
		break;

	case SI_SMIC:
L
Linus Torvalds 已提交
2437
		new_smi->handlers = &smic_smi_handlers;
2438 2439 2440
		break;

	case SI_BT:
L
Linus Torvalds 已提交
2441
		new_smi->handlers = &bt_smi_handlers;
2442 2443 2444
		break;

	default:
L
Linus Torvalds 已提交
2445 2446 2447 2448 2449
		/* No support for anything else yet. */
		rv = -EIO;
		goto out_err;
	}

T
Tony Camuso 已提交
2450 2451
	new_smi->intf_num = smi_num;

2452
	/* Do this early so it's available for logs. */
2453
	if (!new_smi->io.dev) {
T
Tony Camuso 已提交
2454 2455
		init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
				      new_smi->intf_num);
2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466

		/*
		 * If we don't already have a device from something
		 * else (like PCI), then register a new one.
		 */
		new_smi->pdev = platform_device_alloc("ipmi_si",
						      new_smi->intf_num);
		if (!new_smi->pdev) {
			pr_err(PFX "Unable to allocate platform device\n");
			goto out_err;
		}
2467
		new_smi->io.dev = &new_smi->pdev->dev;
2468
		new_smi->io.dev->driver = &ipmi_platform_driver.driver;
2469
		/* Nulled by device_add() */
2470
		new_smi->io.dev->init_name = init_name;
2471 2472
	}

L
Linus Torvalds 已提交
2473 2474
	/* Allocate the state machine's data and initialize it. */
	new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2475
	if (!new_smi->si_sm) {
C
Corey Minyard 已提交
2476
		pr_err(PFX "Could not allocate state machine memory\n");
L
Linus Torvalds 已提交
2477 2478 2479
		rv = -ENOMEM;
		goto out_err;
	}
2480 2481
	new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
							   &new_smi->io);
L
Linus Torvalds 已提交
2482 2483

	/* Now that we know the I/O size, we can set up the I/O. */
2484
	rv = new_smi->io.io_setup(&new_smi->io);
L
Linus Torvalds 已提交
2485
	if (rv) {
2486
		dev_err(new_smi->io.dev, "Could not set up I/O space\n");
L
Linus Torvalds 已提交
2487 2488 2489 2490 2491
		goto out_err;
	}

	/* Do low-level detection first. */
	if (new_smi->handlers->detect(new_smi->si_sm)) {
2492 2493 2494
		if (new_smi->io.addr_source)
			dev_err(new_smi->io.dev,
				"Interface detection failed\n");
L
Linus Torvalds 已提交
2495 2496 2497 2498
		rv = -ENODEV;
		goto out_err;
	}

2499 2500 2501 2502
	/*
	 * Attempt a get device id command.  If it fails, we probably
	 * don't have a BMC here.
	 */
L
Linus Torvalds 已提交
2503
	rv = try_get_dev_id(new_smi);
2504
	if (rv) {
2505 2506 2507
		if (new_smi->io.addr_source)
			dev_err(new_smi->io.dev,
			       "There appears to be no BMC at this location\n");
L
Linus Torvalds 已提交
2508
		goto out_err;
2509
	}
L
Linus Torvalds 已提交
2510

2511
	setup_oem_data_handler(new_smi);
2512
	setup_xaction_handlers(new_smi);
2513
	check_for_broken_irqs(new_smi);
2514

2515
	new_smi->waiting_msg = NULL;
L
Linus Torvalds 已提交
2516 2517
	new_smi->curr_msg = NULL;
	atomic_set(&new_smi->req_events, 0);
C
Corey Minyard 已提交
2518
	new_smi->run_to_completion = false;
2519 2520
	for (i = 0; i < SI_NUM_STATS; i++)
		atomic_set(&new_smi->stats[i], 0);
L
Linus Torvalds 已提交
2521

C
Corey Minyard 已提交
2522
	new_smi->interrupt_disabled = true;
2523
	atomic_set(&new_smi->need_watch, 0);
L
Linus Torvalds 已提交
2524

2525 2526
	rv = try_enable_event_buffer(new_smi);
	if (rv == 0)
C
Corey Minyard 已提交
2527
		new_smi->has_event_buffer = true;
2528

2529 2530 2531 2532
	/*
	 * Start clearing the flags before we enable interrupts or the
	 * timer to avoid racing with the timer.
	 */
2533
	start_clear_flags(new_smi, false);
2534 2535 2536 2537 2538

	/*
	 * IRQ is defined to be set when non-zero.  req_events will
	 * cause a global flags check that will enable interrupts.
	 */
2539
	if (new_smi->io.irq) {
2540 2541 2542
		new_smi->interrupt_disabled = false;
		atomic_set(&new_smi->req_events, 1);
	}
L
Linus Torvalds 已提交
2543

2544
	if (new_smi->pdev) {
2545
		rv = platform_device_add(new_smi->pdev);
2546
		if (rv) {
2547
			dev_err(new_smi->io.dev,
C
Corey Minyard 已提交
2548 2549
				"Unable to register system interface device: %d\n",
				rv);
2550
			goto out_err;
2551 2552 2553
		}
	}

L
Linus Torvalds 已提交
2554 2555
	rv = ipmi_register_smi(&handlers,
			       new_smi,
2556 2557
			       new_smi->io.dev,
			       new_smi->io.slave_addr);
L
Linus Torvalds 已提交
2558
	if (rv) {
2559 2560
		dev_err(new_smi->io.dev,
			"Unable to register device: error %d\n",
2561
			rv);
L
Linus Torvalds 已提交
2562 2563 2564 2565
		goto out_err_stop_timer;
	}

	rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2566
				     &smi_type_proc_ops,
2567
				     new_smi);
L
Linus Torvalds 已提交
2568
	if (rv) {
2569 2570
		dev_err(new_smi->io.dev,
			"Unable to create proc entry: %d\n", rv);
L
Linus Torvalds 已提交
2571 2572 2573 2574
		goto out_err_stop_timer;
	}

	rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2575
				     &smi_si_stats_proc_ops,
2576
				     new_smi);
L
Linus Torvalds 已提交
2577
	if (rv) {
2578 2579
		dev_err(new_smi->io.dev,
			"Unable to create proc entry: %d\n", rv);
L
Linus Torvalds 已提交
2580 2581 2582
		goto out_err_stop_timer;
	}

2583
	rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2584
				     &smi_params_proc_ops,
2585
				     new_smi);
2586
	if (rv) {
2587 2588
		dev_err(new_smi->io.dev,
			"Unable to create proc entry: %d\n", rv);
2589 2590 2591
		goto out_err_stop_timer;
	}

T
Tony Camuso 已提交
2592 2593 2594
	/* Don't increment till we know we have succeeded. */
	smi_num++;

2595 2596
	dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
		 si_to_str[new_smi->io.si_type]);
L
Linus Torvalds 已提交
2597

2598
	WARN_ON(new_smi->io.dev->init_name != NULL);
2599 2600
	kfree(init_name);

L
Linus Torvalds 已提交
2601 2602
	return 0;

2603
out_err_stop_timer:
C
Corey Minyard 已提交
2604
	wait_for_timer_and_thread(new_smi);
L
Linus Torvalds 已提交
2605

2606
out_err:
C
Corey Minyard 已提交
2607
	new_smi->interrupt_disabled = true;
2608 2609

	if (new_smi->intf) {
2610
		ipmi_smi_t intf = new_smi->intf;
2611
		new_smi->intf = NULL;
2612
		ipmi_unregister_smi(intf);
2613
	}
L
Linus Torvalds 已提交
2614

2615 2616 2617
	if (new_smi->io.irq_cleanup) {
		new_smi->io.irq_cleanup(&new_smi->io);
		new_smi->io.irq_cleanup = NULL;
2618
	}
L
Linus Torvalds 已提交
2619

2620 2621 2622 2623 2624
	/*
	 * Wait until we know that we are out of any interrupt
	 * handlers might have been running before we freed the
	 * interrupt.
	 */
2625
	synchronize_sched();
L
Linus Torvalds 已提交
2626 2627 2628 2629 2630

	if (new_smi->si_sm) {
		if (new_smi->handlers)
			new_smi->handlers->cleanup(new_smi->si_sm);
		kfree(new_smi->si_sm);
2631
		new_smi->si_sm = NULL;
L
Linus Torvalds 已提交
2632
	}
2633 2634 2635
	if (new_smi->io.addr_source_cleanup) {
		new_smi->io.addr_source_cleanup(&new_smi->io);
		new_smi->io.addr_source_cleanup = NULL;
2636
	}
2637 2638 2639
	if (new_smi->io.io_cleanup) {
		new_smi->io.io_cleanup(&new_smi->io);
		new_smi->io.io_cleanup = NULL;
2640
	}
L
Linus Torvalds 已提交
2641

2642
	if (new_smi->pdev) {
2643
		platform_device_unregister(new_smi->pdev);
2644 2645 2646
		new_smi->pdev = NULL;
	} else if (new_smi->pdev) {
		platform_device_put(new_smi->pdev);
2647
	}
2648

2649 2650
	kfree(init_name);

L
Linus Torvalds 已提交
2651 2652 2653
	return rv;
}

B
Bill Pemberton 已提交
2654
static int init_ipmi_si(void)
L
Linus Torvalds 已提交
2655
{
2656
	int  rv;
2657
	struct smi_info *e;
2658
	enum ipmi_addr_src type = SI_INVALID;
L
Linus Torvalds 已提交
2659 2660 2661 2662

	if (initialized)
		return 0;

C
Corey Minyard 已提交
2663
	pr_info("IPMI System Interface driver.\n");
L
Linus Torvalds 已提交
2664

2665
	/* If the user gave us a device, they presumably want us to use it */
2666 2667
	if (!ipmi_si_hardcode_find_bmc())
		goto do_scan;
2668

2669 2670
	ipmi_si_platform_init();

2671
#ifdef CONFIG_PCI
2672 2673 2674
	if (si_trypci) {
		rv = pci_register_driver(&ipmi_pci_driver);
		if (rv)
C
Corey Minyard 已提交
2675
			pr_err(PFX "Unable to register PCI driver: %d\n", rv);
2676
		else
C
Corey Minyard 已提交
2677
			pci_registered = true;
2678
	}
2679 2680
#endif

2681 2682
#ifdef CONFIG_PARISC
	register_parisc_driver(&ipmi_parisc_driver);
C
Corey Minyard 已提交
2683
	parisc_registered = true;
2684 2685
#endif

2686 2687 2688 2689
	/* We prefer devices with interrupts, but in the case of a machine
	   with multiple BMCs we assume that there will be several instances
	   of a given type so if we succeed in registering a type then also
	   try to register everything else of the same type */
2690
do_scan:
2691 2692
	mutex_lock(&smi_infos_lock);
	list_for_each_entry(e, &smi_infos, link) {
2693 2694 2695
		/* Try to register a device if it has an IRQ and we either
		   haven't successfully registered a device yet or this
		   device has the same type as one we successfully registered */
2696
		if (e->io.irq && (!type || e->io.addr_source == type)) {
2697
			if (!try_smi_init(e)) {
2698
				type = e->io.addr_source;
2699 2700 2701 2702
			}
		}
	}

2703
	/* type will only have been set if we successfully registered an si */
2704 2705
	if (type)
		goto skip_fallback_noirq;
2706

2707 2708 2709
	/* Fall back to the preferred device */

	list_for_each_entry(e, &smi_infos, link) {
2710
		if (!e->io.irq && (!type || e->io.addr_source == type)) {
2711
			if (!try_smi_init(e)) {
2712
				type = e->io.addr_source;
2713 2714
			}
		}
2715
	}
2716 2717 2718

skip_fallback_noirq:
	initialized = 1;
2719 2720
	mutex_unlock(&smi_infos_lock);

2721 2722 2723
	if (type)
		return 0;

2724
	mutex_lock(&smi_infos_lock);
2725
	if (unload_when_empty && list_empty(&smi_infos)) {
2726
		mutex_unlock(&smi_infos_lock);
2727
		cleanup_ipmi_si();
C
Corey Minyard 已提交
2728
		pr_warn(PFX "Unable to find any System Interface(s)\n");
L
Linus Torvalds 已提交
2729
		return -ENODEV;
2730
	} else {
2731
		mutex_unlock(&smi_infos_lock);
2732
		return 0;
L
Linus Torvalds 已提交
2733 2734 2735 2736
	}
}
module_init(init_ipmi_si);

2737
static void cleanup_one_si(struct smi_info *to_clean)
L
Linus Torvalds 已提交
2738
{
2739
	int           rv = 0;
L
Linus Torvalds 已提交
2740

2741
	if (!to_clean)
L
Linus Torvalds 已提交
2742 2743
		return;

2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754
	if (to_clean->intf) {
		ipmi_smi_t intf = to_clean->intf;

		to_clean->intf = NULL;
		rv = ipmi_unregister_smi(intf);
		if (rv) {
			pr_err(PFX "Unable to unregister device: errno=%d\n",
			       rv);
		}
	}

2755 2756
	list_del(&to_clean->link);

2757
	/*
2758 2759
	 * Make sure that interrupts, the timer and the thread are
	 * stopped and will not run again.
2760
	 */
2761 2762
	if (to_clean->io.irq_cleanup)
		to_clean->io.irq_cleanup(&to_clean->io);
C
Corey Minyard 已提交
2763
	wait_for_timer_and_thread(to_clean);
L
Linus Torvalds 已提交
2764

2765 2766
	/*
	 * Timeouts are stopped, now make sure the interrupts are off
2767 2768
	 * in the BMC.  Note that timers and CPU interrupts are off,
	 * so no need for locks.
2769
	 */
C
Corey Minyard 已提交
2770 2771 2772 2773
	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
		poll(to_clean);
		schedule_timeout_uninterruptible(1);
	}
2774 2775
	if (to_clean->handlers)
		disable_si_irq(to_clean, false);
C
Corey Minyard 已提交
2776
	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
L
Linus Torvalds 已提交
2777
		poll(to_clean);
2778
		schedule_timeout_uninterruptible(1);
L
Linus Torvalds 已提交
2779 2780
	}

2781 2782
	if (to_clean->handlers)
		to_clean->handlers->cleanup(to_clean->si_sm);
L
Linus Torvalds 已提交
2783 2784 2785

	kfree(to_clean->si_sm);

2786 2787
	if (to_clean->io.addr_source_cleanup)
		to_clean->io.addr_source_cleanup(&to_clean->io);
2788 2789
	if (to_clean->io.io_cleanup)
		to_clean->io.io_cleanup(&to_clean->io);
2790

2791
	if (to_clean->pdev)
2792 2793 2794
		platform_device_unregister(to_clean->pdev);

	kfree(to_clean);
L
Linus Torvalds 已提交
2795 2796
}

2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814
int ipmi_si_remove_by_dev(struct device *dev)
{
	struct smi_info *e;
	int rv = -ENOENT;

	mutex_lock(&smi_infos_lock);
	list_for_each_entry(e, &smi_infos, link) {
		if (e->io.dev == dev) {
			cleanup_one_si(e);
			rv = 0;
			break;
		}
	}
	mutex_unlock(&smi_infos_lock);

	return rv;
}

2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832
void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
			    unsigned long addr)
{
	/* remove */
	struct smi_info *e, *tmp_e;

	mutex_lock(&smi_infos_lock);
	list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
		if (e->io.addr_type != addr_space)
			continue;
		if (e->io.si_type != si_type)
			continue;
		if (e->io.addr_data == addr)
			cleanup_one_si(e);
	}
	mutex_unlock(&smi_infos_lock);
}

2833
static void cleanup_ipmi_si(void)
L
Linus Torvalds 已提交
2834
{
2835
	struct smi_info *e, *tmp_e;
L
Linus Torvalds 已提交
2836

2837
	if (!initialized)
L
Linus Torvalds 已提交
2838 2839
		return;

2840
#ifdef CONFIG_PCI
2841 2842
	if (pci_registered)
		pci_unregister_driver(&ipmi_pci_driver);
2843
#endif
2844 2845 2846 2847
#ifdef CONFIG_PARISC
	if (parisc_registered)
		unregister_parisc_driver(&ipmi_parisc_driver);
#endif
2848

2849
	ipmi_si_platform_shutdown();
2850

2851
	mutex_lock(&smi_infos_lock);
2852 2853
	list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
		cleanup_one_si(e);
2854
	mutex_unlock(&smi_infos_lock);
L
Linus Torvalds 已提交
2855 2856 2857
}
module_exit(cleanup_ipmi_si);

2858
MODULE_ALIAS("platform:dmi-ipmi-si");
L
Linus Torvalds 已提交
2859
MODULE_LICENSE("GPL");
2860
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2861 2862
MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
		   " system interfaces.");