acpi_ipmi.c 18.0 KB
Newer Older
1 2 3
/*
 *  acpi_ipmi.c - ACPI IPMI opregion
 *
4 5 6
 *  Copyright (C) 2010, 2013 Intel Corporation
 *    Author: Zhao Yakui <yakui.zhao@intel.com>
 *            Lv Zheng <lv.zheng@intel.com>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or (at
 *  your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/ipmi.h>
#include <linux/device.h>
#include <linux/pnp.h>
43
#include <linux/spinlock.h>
44 45 46 47 48 49 50 51 52 53

MODULE_AUTHOR("Zhao Yakui");
MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
MODULE_LICENSE("GPL");


#define ACPI_IPMI_OK			0
#define ACPI_IPMI_TIMEOUT		0x10
#define ACPI_IPMI_UNKNOWN		0x07
/* the IPMI timeout is 5s */
54
#define IPMI_TIMEOUT			(5000)
55
#define ACPI_IPMI_MAX_MSG_LENGTH	64
56 57 58 59 60 61

struct acpi_ipmi_device {
	/* the device list attached to driver_data.ipmi_devices */
	struct list_head head;
	/* the IPMI request message list */
	struct list_head tx_msg_list;
62
	spinlock_t	tx_msg_lock;
63 64 65 66 67 68
	acpi_handle handle;
	struct pnp_dev *pnp_dev;
	ipmi_user_t	user_interface;
	int ipmi_ifnum; /* IPMI interface number */
	long curr_msgid;
	struct ipmi_smi_info smi_data;
69 70
	bool dead;
	struct kref kref;
71 72 73 74 75 76 77
};

struct ipmi_driver_data {
	struct list_head	ipmi_devices;
	struct ipmi_smi_watcher	bmc_events;
	struct ipmi_user_hndl	ipmi_hndlrs;
	struct mutex		ipmi_lock;
78 79 80 81 82 83 84 85
	/*
	 * NOTE: IPMI System Interface Selection
	 * There is no system interface specified by the IPMI operation
	 * region access.  We try to select one system interface with ACPI
	 * handle set.  IPMI messages passed from the ACPI codes are sent
	 * to this selected global IPMI system interface.
	 */
	struct acpi_ipmi_device *selected_smi;
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
};

struct acpi_ipmi_msg {
	struct list_head head;
	/*
	 * General speaking the addr type should be SI_ADDR_TYPE. And
	 * the addr channel should be BMC.
	 * In fact it can also be IPMB type. But we will have to
	 * parse it from the Netfn command buffer. It is so complex
	 * that it is skipped.
	 */
	struct ipmi_addr addr;
	long tx_msgid;
	/* it is used to track whether the IPMI message is finished */
	struct completion tx_complete;
	struct kernel_ipmi_msg tx_message;
	int	msg_done;
103 104 105
	/* tx/rx data . And copy it from/to ACPI object buffer */
	u8	data[ACPI_IPMI_MAX_MSG_LENGTH];
	u8	rx_len;
106
	struct acpi_ipmi_device *device;
107
	struct kref	kref;
108 109 110 111 112 113
};

/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
struct acpi_ipmi_buffer {
	u8 status;
	u8 length;
114
	u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
};

static void ipmi_register_bmc(int iface, struct device *dev);
static void ipmi_bmc_gone(int iface);
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);

static struct ipmi_driver_data driver_data = {
	.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
	.bmc_events = {
		.owner = THIS_MODULE,
		.new_smi = ipmi_register_bmc,
		.smi_gone = ipmi_bmc_gone,
	},
	.ipmi_hndlrs = {
		.ipmi_recv_hndl = ipmi_msg_handler,
	},
};

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static struct acpi_ipmi_device *
ipmi_dev_alloc(int iface, struct ipmi_smi_info *smi_data, acpi_handle handle)
{
	struct acpi_ipmi_device *ipmi_device;
	int err;
	ipmi_user_t user;

	ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
	if (!ipmi_device)
		return NULL;

	kref_init(&ipmi_device->kref);
	INIT_LIST_HEAD(&ipmi_device->head);
	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
	spin_lock_init(&ipmi_device->tx_msg_lock);

	ipmi_device->handle = handle;
	ipmi_device->pnp_dev = to_pnp_dev(get_device(smi_data->dev));
	memcpy(&ipmi_device->smi_data, smi_data, sizeof(struct ipmi_smi_info));
	ipmi_device->ipmi_ifnum = iface;

	err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
			       ipmi_device, &user);
	if (err) {
		put_device(smi_data->dev);
		kfree(ipmi_device);
		return NULL;
	}
	ipmi_device->user_interface = user;

	return ipmi_device;
}

static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
{
	ipmi_destroy_user(ipmi_device->user_interface);
	put_device(ipmi_device->smi_data.dev);
	kfree(ipmi_device);
}

static void ipmi_dev_release_kref(struct kref *kref)
{
	struct acpi_ipmi_device *ipmi =
		container_of(kref, struct acpi_ipmi_device, kref);

	ipmi_dev_release(ipmi);
}

static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
{
	list_del(&ipmi_device->head);
184 185
	if (driver_data.selected_smi == ipmi_device)
		driver_data.selected_smi = NULL;
186 187 188 189 190 191 192
	/*
	 * Always setting dead flag after deleting from the list or
	 * list_for_each_entry() codes must get changed.
	 */
	ipmi_device->dead = true;
}

193
static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
194
{
195
	struct acpi_ipmi_device *ipmi_device = NULL;
196 197

	mutex_lock(&driver_data.ipmi_lock);
198 199 200
	if (driver_data.selected_smi) {
		ipmi_device = driver_data.selected_smi;
		kref_get(&ipmi_device->kref);
201 202 203 204 205 206 207 208 209 210 211
	}
	mutex_unlock(&driver_data.ipmi_lock);

	return ipmi_device;
}

static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
{
	kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
}

212
static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
213
{
214
	struct acpi_ipmi_device *ipmi;
215 216
	struct acpi_ipmi_msg *ipmi_msg;

217 218 219
	ipmi = acpi_ipmi_dev_get();
	if (!ipmi)
		return NULL;
220
	ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
221 222
	if (!ipmi_msg) {
		acpi_ipmi_dev_put(ipmi);
223 224
		return NULL;
	}
225
	kref_init(&ipmi_msg->kref);
226 227 228
	init_completion(&ipmi_msg->tx_complete);
	INIT_LIST_HEAD(&ipmi_msg->head);
	ipmi_msg->device = ipmi;
229
	ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
230 231 232
	return ipmi_msg;
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
{
	acpi_ipmi_dev_put(tx_msg->device);
	kfree(tx_msg);
}

static void ipmi_msg_release_kref(struct kref *kref)
{
	struct acpi_ipmi_msg *tx_msg =
		container_of(kref, struct acpi_ipmi_msg, kref);

	ipmi_msg_release(tx_msg);
}

static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
{
	kref_get(&tx_msg->kref);

	return tx_msg;
}

static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
{
	kref_put(&tx_msg->kref, ipmi_msg_release_kref);
}

259 260
#define		IPMI_OP_RGN_NETFN(offset)	((offset >> 8) & 0xff)
#define		IPMI_OP_RGN_CMD(offset)		(offset & 0xff)
261
static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
262 263 264 265 266 267
				acpi_physical_address address,
				acpi_integer *value)
{
	struct kernel_ipmi_msg *msg;
	struct acpi_ipmi_buffer *buffer;
	struct acpi_ipmi_device *device;
268
	unsigned long flags;
269 270 271 272 273 274 275 276

	msg = &tx_msg->tx_message;
	/*
	 * IPMI network function and command are encoded in the address
	 * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
	 */
	msg->netfn = IPMI_OP_RGN_NETFN(address);
	msg->cmd = IPMI_OP_RGN_CMD(address);
277
	msg->data = tx_msg->data;
278 279 280 281 282 283
	/*
	 * value is the parameter passed by the IPMI opregion space handler.
	 * It points to the IPMI request message buffer
	 */
	buffer = (struct acpi_ipmi_buffer *)value;
	/* copy the tx message data */
284 285 286 287 288 289
	if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
		dev_WARN_ONCE(&tx_msg->device->pnp_dev->dev, true,
			      "Unexpected request (msg len %d).\n",
			      buffer->length);
		return -EINVAL;
	}
290
	msg->data_len = buffer->length;
291
	memcpy(tx_msg->data, buffer->data, msg->data_len);
292 293 294 295 296 297 298 299 300 301 302 303 304
	/*
	 * now the default type is SYSTEM_INTERFACE and channel type is BMC.
	 * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
	 * the addr type should be changed to IPMB. Then we will have to parse
	 * the IPMI request message buffer to get the IPMB address.
	 * If so, please fix me.
	 */
	tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	tx_msg->addr.channel = IPMI_BMC_CHANNEL;
	tx_msg->addr.data[0] = 0;

	/* Get the msgid */
	device = tx_msg->device;
305
	spin_lock_irqsave(&device->tx_msg_lock, flags);
306 307
	device->curr_msgid++;
	tx_msg->tx_msgid = device->curr_msgid;
308
	spin_unlock_irqrestore(&device->tx_msg_lock, flags);
309
	return 0;
310 311 312
}

static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
313
		acpi_integer *value)
314 315 316 317 318 319 320 321 322
{
	struct acpi_ipmi_buffer *buffer;

	/*
	 * value is also used as output parameter. It represents the response
	 * IPMI message returned by IPMI command.
	 */
	buffer = (struct acpi_ipmi_buffer *)value;
	/*
323 324
	 * If the flag of msg_done is not set, it means that the IPMI command is
	 * not executed correctly.
325
	 */
326 327
	buffer->status = msg->msg_done;
	if (msg->msg_done != ACPI_IPMI_OK)
328 329 330 331 332 333
		return;
	/*
	 * If the IPMI response message is obtained correctly, the status code
	 * will be ACPI_IPMI_OK
	 */
	buffer->length = msg->rx_len;
334
	memcpy(buffer->data, msg->data, msg->rx_len);
335 336 337 338
}

static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
{
339
	struct acpi_ipmi_msg *tx_msg;
340
	unsigned long flags;
341

342 343 344 345 346 347 348 349
	/*
	 * NOTE: On-going ipmi_recv_msg
	 * ipmi_msg_handler() may still be invoked by ipmi_si after
	 * flushing.  But it is safe to do a fast flushing on module_exit()
	 * without waiting for all ipmi_recv_msg(s) to complete from
	 * ipmi_msg_handler() as it is ensured by ipmi_si that all
	 * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
	 */
350
	spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
351 352 353 354 355 356 357
	while (!list_empty(&ipmi->tx_msg_list)) {
		tx_msg = list_first_entry(&ipmi->tx_msg_list,
					  struct acpi_ipmi_msg,
					  head);
		list_del(&tx_msg->head);
		spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);

358 359
		/* wake up the sleep thread on the Tx msg */
		complete(&tx_msg->tx_complete);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
		acpi_ipmi_msg_put(tx_msg);
		spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
	}
	spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
}

static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
			       struct acpi_ipmi_msg *msg)
{
	struct acpi_ipmi_msg *tx_msg, *temp;
	bool msg_found = false;
	unsigned long flags;

	spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
	list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
		if (msg == tx_msg) {
			msg_found = true;
			list_del(&tx_msg->head);
			break;
		}
380
	}
381
	spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
382 383 384

	if (msg_found)
		acpi_ipmi_msg_put(tx_msg);
385 386 387 388 389
}

static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
{
	struct acpi_ipmi_device *ipmi_device = user_msg_data;
390 391
	bool msg_found = false;
	struct acpi_ipmi_msg *tx_msg, *temp;
392
	struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
393
	unsigned long flags;
394 395 396 397 398

	if (msg->user != ipmi_device->user_interface) {
		dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
			"returned user %p, expected user %p\n",
			msg->user, ipmi_device->user_interface);
399
		goto out_msg;
400
	}
401
	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
402
	list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
403
		if (msg->msgid == tx_msg->tx_msgid) {
404 405
			msg_found = true;
			list_del(&tx_msg->head);
406 407 408
			break;
		}
	}
409
	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
410 411 412 413

	if (!msg_found) {
		dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
			"returned.\n", msg->msgid);
414
		goto out_msg;
415 416
	}

417 418 419 420 421
	/* copy the response data to Rx_data buffer */
	if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
		dev_WARN_ONCE(&pnp_dev->dev, true,
			      "Unexpected response (msg len %d).\n",
			      msg->msg.data_len);
422
		goto out_comp;
423
	}
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
	/* response msg is an error msg */
	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
	if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
	    msg->msg.data_len == 1) {
		if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
			dev_WARN_ONCE(&pnp_dev->dev, true,
				      "Unexpected response (timeout).\n");
			tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
		}
		goto out_comp;
	}
	tx_msg->rx_len = msg->msg.data_len;
	memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
	tx_msg->msg_done = ACPI_IPMI_OK;
out_comp:
439
	complete(&tx_msg->tx_complete);
440
	acpi_ipmi_msg_put(tx_msg);
441
out_msg:
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
	ipmi_free_recv_msg(msg);
};

static void ipmi_register_bmc(int iface, struct device *dev)
{
	struct acpi_ipmi_device *ipmi_device, *temp;
	struct pnp_dev *pnp_dev;
	int err;
	struct ipmi_smi_info smi_data;
	acpi_handle handle;

	err = ipmi_get_smi_info(iface, &smi_data);

	if (err)
		return;

458 459
	if (smi_data.addr_src != SI_ACPI)
		goto err_ref;
460
	handle = smi_data.addr_info.acpi_info.acpi_handle;
461 462 463 464 465 466 467 468 469
	if (!handle)
		goto err_ref;
	pnp_dev = to_pnp_dev(smi_data.dev);

	ipmi_device = ipmi_dev_alloc(iface, &smi_data, handle);
	if (!ipmi_device) {
		dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
		goto err_ref;
	}
470 471 472 473 474 475 476 477

	mutex_lock(&driver_data.ipmi_lock);
	list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
		/*
		 * if the corresponding ACPI handle is already added
		 * to the device list, don't add it again.
		 */
		if (temp->handle == handle)
478
			goto err_lock;
479 480
	}

481 482
	if (!driver_data.selected_smi)
		driver_data.selected_smi = ipmi_device;
483
	list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
484
	mutex_unlock(&driver_data.ipmi_lock);
485
	put_device(smi_data.dev);
486 487
	return;

488
err_lock:
489
	mutex_unlock(&driver_data.ipmi_lock);
490 491
	ipmi_dev_release(ipmi_device);
err_ref:
492 493 494 495 496 497 498
	put_device(smi_data.dev);
	return;
}

static void ipmi_bmc_gone(int iface)
{
	struct acpi_ipmi_device *ipmi_device, *temp;
499
	bool dev_found = false;
500 501 502 503

	mutex_lock(&driver_data.ipmi_lock);
	list_for_each_entry_safe(ipmi_device, temp,
				&driver_data.ipmi_devices, head) {
504 505 506 507 508
		if (ipmi_device->ipmi_ifnum != iface) {
			dev_found = true;
			__ipmi_dev_kill(ipmi_device);
			break;
		}
509
	}
510 511 512 513
	if (!driver_data.selected_smi)
		driver_data.selected_smi = list_first_entry_or_null(
					&driver_data.ipmi_devices,
					struct acpi_ipmi_device, head);
514
	mutex_unlock(&driver_data.ipmi_lock);
515 516 517 518
	if (dev_found) {
		ipmi_flush_tx_msg(ipmi_device);
		acpi_ipmi_dev_put(ipmi_device);
	}
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
}
/* --------------------------------------------------------------------------
 *			Address Space Management
 * -------------------------------------------------------------------------- */
/*
 * This is the IPMI opregion space handler.
 * @function: indicates the read/write. In fact as the IPMI message is driven
 * by command, only write is meaningful.
 * @address: This contains the netfn/command of IPMI request message.
 * @bits   : not used.
 * @value  : it is an in/out parameter. It points to the IPMI message buffer.
 *	     Before the IPMI message is sent, it represents the actual request
 *	     IPMI message. After the IPMI message is finished, it represents
 *	     the response IPMI message returned by IPMI command.
 * @handler_context: IPMI device context.
 */

static acpi_status
acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
		      u32 bits, acpi_integer *value,
		      void *handler_context, void *region_context)
{
	struct acpi_ipmi_msg *tx_msg;
542
	struct acpi_ipmi_device *ipmi_device;
543
	int err;
544
	acpi_status status;
545
	unsigned long flags;
546 547 548 549 550 551 552 553 554
	/*
	 * IPMI opregion message.
	 * IPMI message is firstly written to the BMC and system software
	 * can get the respsonse. So it is unmeaningful for the read access
	 * of IPMI opregion.
	 */
	if ((function & ACPI_IO_MASK) == ACPI_READ)
		return AE_TYPE;

555 556
	tx_msg = ipmi_msg_alloc();
	if (!tx_msg)
557 558
		return AE_NOT_EXIST;

559
	ipmi_device = tx_msg->device;
560

561
	if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
562 563
		ipmi_msg_release(tx_msg);
		return AE_TYPE;
564
	}
565
	acpi_ipmi_msg_get(tx_msg);
566 567 568 569
	mutex_lock(&driver_data.ipmi_lock);
	/* Do not add a tx_msg that can not be flushed. */
	if (ipmi_device->dead) {
		mutex_unlock(&driver_data.ipmi_lock);
570 571
		ipmi_msg_release(tx_msg);
		return AE_NOT_EXIST;
572
	}
573
	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
574
	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
575
	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
576
	mutex_unlock(&driver_data.ipmi_lock);
577 578 579 580
	err = ipmi_request_settime(ipmi_device->user_interface,
					&tx_msg->addr,
					tx_msg->tx_msgid,
					&tx_msg->tx_message,
581
					NULL, 0, 0, IPMI_TIMEOUT);
582 583
	if (err) {
		status = AE_ERROR;
584
		goto out_msg;
585
	}
586 587
	wait_for_completion(&tx_msg->tx_complete);
	acpi_format_ipmi_response(tx_msg, value);
588 589
	status = AE_OK;

590
out_msg:
591 592
	ipmi_cancel_tx_msg(ipmi_device, tx_msg);
	acpi_ipmi_msg_put(tx_msg);
593 594 595 596 597 598
	return status;
}

static int __init acpi_ipmi_init(void)
{
	int result = 0;
599
	acpi_status status;
600 601 602 603 604 605

	if (acpi_disabled)
		return result;

	mutex_init(&driver_data.ipmi_lock);

606 607 608 609 610 611 612
	status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
				ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler,
				NULL, NULL);
	if (ACPI_FAILURE(status)) {
		pr_warn("Can't register IPMI opregion space handle\n");
		return -EINVAL;
	}
613
	result = ipmi_smi_watcher_register(&driver_data.bmc_events);
614 615
	if (result)
		pr_err("Can't register IPMI system interface watcher\n");
616 617 618 619 620 621

	return result;
}

static void __exit acpi_ipmi_exit(void)
{
622
	struct acpi_ipmi_device *ipmi_device;
623 624 625 626 627 628 629 630 631 632 633 634 635

	if (acpi_disabled)
		return;

	ipmi_smi_watcher_unregister(&driver_data.bmc_events);

	/*
	 * When one smi_watcher is unregistered, it is only deleted
	 * from the smi_watcher list. But the smi_gone callback function
	 * is not called. So explicitly uninstall the ACPI IPMI oregion
	 * handler and free it.
	 */
	mutex_lock(&driver_data.ipmi_lock);
636 637 638 639 640 641 642 643 644 645 646
	while (!list_empty(&driver_data.ipmi_devices)) {
		ipmi_device = list_first_entry(&driver_data.ipmi_devices,
					       struct acpi_ipmi_device,
					       head);
		__ipmi_dev_kill(ipmi_device);
		mutex_unlock(&driver_data.ipmi_lock);

		ipmi_flush_tx_msg(ipmi_device);
		acpi_ipmi_dev_put(ipmi_device);

		mutex_lock(&driver_data.ipmi_lock);
647 648
	}
	mutex_unlock(&driver_data.ipmi_lock);
649 650
	acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
				ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
651 652 653 654
}

module_init(acpi_ipmi_init);
module_exit(acpi_ipmi_exit);