提交 7b984477 编写于 作者: L Lv Zheng 提交者: Rafael J. Wysocki

ACPI / IPMI: Add reference counting for ACPI IPMI transfers

This patch adds reference counting for ACPI IPMI transfers to tune the
locking granularity of tx_msg_lock.

This patch also makes the whole acpi_ipmi module's coding style consistent
by using reference counting for all its objects (i.e., acpi_ipmi_device and
acpi_ipmi_msg).

The acpi_ipmi_msg handling is re-designed using referece counting.
 1. tx_msg is always unlinked before complete(), so that it is safe to put
    complete() out side of tx_msg_lock.
 2. tx_msg reference counters are incremented before calling
    ipmi_request_settime() and tx_msg_lock protection is added to
    ipmi_cancel_tx_msg() so that a complete() can be safely called in
    parellel with tx_msg unlinking in failure cases.
 3. tx_msg holds a reference to acpi_ipmi_device so that it can be flushed
    and freed in the contexts other than acpi_ipmi_space_handler().

The lockdep_chains shows all acpi_ipmi locks are leaf locks after the
tuning:
 1. ipmi_lock is always leaf:
    irq_context: 0
    [ffffffff81a943f8] smi_watchers_mutex
    [ffffffffa06eca60] driver_data.ipmi_lock
    irq_context: 0
    [ffffffff82767b40] &buffer->mutex
    [ffffffffa00a6678] s_active#103
    [ffffffffa06eca60] driver_data.ipmi_lock
 2. without this patch applied, lock used by complete() is held after
    holding tx_msg_lock:
    irq_context: 0
    [ffffffff82767b40] &buffer->mutex
    [ffffffffa00a6678] s_active#103
    [ffffffffa06ecce8] &(&ipmi_device->tx_msg_lock)->rlock
    irq_context: 1
    [ffffffffa06ecce8] &(&ipmi_device->tx_msg_lock)->rlock
    irq_context: 1
    [ffffffffa06ecce8] &(&ipmi_device->tx_msg_lock)->rlock
    [ffffffffa06eccf0] &x->wait#25
    irq_context: 1
    [ffffffffa06ecce8] &(&ipmi_device->tx_msg_lock)->rlock
    [ffffffffa06eccf0] &x->wait#25
    [ffffffff81e36620] &p->pi_lock
    irq_context: 1
    [ffffffffa06ecce8] &(&ipmi_device->tx_msg_lock)->rlock
    [ffffffffa06eccf0] &x->wait#25
    [ffffffff81e36620] &p->pi_lock
    [ffffffff81e5d0a8] &rq->lock
 3. with this patch applied, tx_msg_lock is always leaf:
    irq_context: 0
    [ffffffff82767b40] &buffer->mutex
    [ffffffffa00a66d8] s_active#107
    [ffffffffa07ecdc8] &(&ipmi_device->tx_msg_lock)->rlock
    irq_context: 1
    [ffffffffa07ecdc8] &(&ipmi_device->tx_msg_lock)->rlock
Signed-off-by: NLv Zheng <lv.zheng@intel.com>
Reviewed-by: NHuang Ying <ying.huang@intel.com>
Signed-off-by: NRafael J. Wysocki <rafael.j.wysocki@intel.com>
上级 e96a94ed
...@@ -104,6 +104,7 @@ struct acpi_ipmi_msg { ...@@ -104,6 +104,7 @@ struct acpi_ipmi_msg {
u8 data[ACPI_IPMI_MAX_MSG_LENGTH]; u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
u8 rx_len; u8 rx_len;
struct acpi_ipmi_device *device; struct acpi_ipmi_device *device;
struct kref kref;
}; };
/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */ /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
...@@ -208,16 +209,20 @@ static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device) ...@@ -208,16 +209,20 @@ static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
kref_put(&ipmi_device->kref, ipmi_dev_release_kref); kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
} }
static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi) static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
{ {
struct acpi_ipmi_device *ipmi;
struct acpi_ipmi_msg *ipmi_msg; struct acpi_ipmi_msg *ipmi_msg;
struct pnp_dev *pnp_dev = ipmi->pnp_dev;
ipmi = acpi_ipmi_dev_get();
if (!ipmi)
return NULL;
ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL); ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
if (!ipmi_msg) { if (!ipmi_msg) {
dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n"); acpi_ipmi_dev_put(ipmi);
return NULL; return NULL;
} }
kref_init(&ipmi_msg->kref);
init_completion(&ipmi_msg->tx_complete); init_completion(&ipmi_msg->tx_complete);
INIT_LIST_HEAD(&ipmi_msg->head); INIT_LIST_HEAD(&ipmi_msg->head);
ipmi_msg->device = ipmi; ipmi_msg->device = ipmi;
...@@ -225,6 +230,32 @@ static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi) ...@@ -225,6 +230,32 @@ static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
return ipmi_msg; return ipmi_msg;
} }
static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
{
acpi_ipmi_dev_put(tx_msg->device);
kfree(tx_msg);
}
static void ipmi_msg_release_kref(struct kref *kref)
{
struct acpi_ipmi_msg *tx_msg =
container_of(kref, struct acpi_ipmi_msg, kref);
ipmi_msg_release(tx_msg);
}
static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
{
kref_get(&tx_msg->kref);
return tx_msg;
}
static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
{
kref_put(&tx_msg->kref, ipmi_msg_release_kref);
}
#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff) #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
#define IPMI_OP_RGN_CMD(offset) (offset & 0xff) #define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg, static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
...@@ -305,7 +336,7 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, ...@@ -305,7 +336,7 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
{ {
struct acpi_ipmi_msg *tx_msg, *temp; struct acpi_ipmi_msg *tx_msg;
unsigned long flags; unsigned long flags;
/* /*
...@@ -317,18 +348,47 @@ static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) ...@@ -317,18 +348,47 @@ static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
* ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user(). * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
*/ */
spin_lock_irqsave(&ipmi->tx_msg_lock, flags); spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { while (!list_empty(&ipmi->tx_msg_list)) {
tx_msg = list_first_entry(&ipmi->tx_msg_list,
struct acpi_ipmi_msg,
head);
list_del(&tx_msg->head);
spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
/* wake up the sleep thread on the Tx msg */ /* wake up the sleep thread on the Tx msg */
complete(&tx_msg->tx_complete); complete(&tx_msg->tx_complete);
acpi_ipmi_msg_put(tx_msg);
spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
}
spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
}
static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
struct acpi_ipmi_msg *msg)
{
struct acpi_ipmi_msg *tx_msg, *temp;
bool msg_found = false;
unsigned long flags;
spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
if (msg == tx_msg) {
msg_found = true;
list_del(&tx_msg->head);
break;
}
} }
spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
if (msg_found)
acpi_ipmi_msg_put(tx_msg);
} }
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
{ {
struct acpi_ipmi_device *ipmi_device = user_msg_data; struct acpi_ipmi_device *ipmi_device = user_msg_data;
int msg_found = 0; bool msg_found = false;
struct acpi_ipmi_msg *tx_msg; struct acpi_ipmi_msg *tx_msg, *temp;
struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
unsigned long flags; unsigned long flags;
...@@ -339,17 +399,19 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) ...@@ -339,17 +399,19 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
goto out_msg; goto out_msg;
} }
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
if (msg->msgid == tx_msg->tx_msgid) { if (msg->msgid == tx_msg->tx_msgid) {
msg_found = 1; msg_found = true;
list_del(&tx_msg->head);
break; break;
} }
} }
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
if (!msg_found) { if (!msg_found) {
dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
"returned.\n", msg->msgid); "returned.\n", msg->msgid);
goto out_lock; goto out_msg;
} }
/* copy the response data to Rx_data buffer */ /* copy the response data to Rx_data buffer */
...@@ -375,8 +437,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) ...@@ -375,8 +437,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
tx_msg->msg_done = ACPI_IPMI_OK; tx_msg->msg_done = ACPI_IPMI_OK;
out_comp: out_comp:
complete(&tx_msg->tx_complete); complete(&tx_msg->tx_complete);
out_lock: acpi_ipmi_msg_put(tx_msg);
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
out_msg: out_msg:
ipmi_free_recv_msg(msg); ipmi_free_recv_msg(msg);
}; };
...@@ -491,26 +552,23 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, ...@@ -491,26 +552,23 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
if ((function & ACPI_IO_MASK) == ACPI_READ) if ((function & ACPI_IO_MASK) == ACPI_READ)
return AE_TYPE; return AE_TYPE;
ipmi_device = acpi_ipmi_dev_get(); tx_msg = ipmi_msg_alloc();
if (!ipmi_device) if (!tx_msg)
return AE_NOT_EXIST; return AE_NOT_EXIST;
tx_msg = acpi_alloc_ipmi_msg(ipmi_device); ipmi_device = tx_msg->device;
if (!tx_msg) {
status = AE_NO_MEMORY;
goto out_ref;
}
if (acpi_format_ipmi_request(tx_msg, address, value) != 0) { if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
status = AE_TYPE; ipmi_msg_release(tx_msg);
goto out_msg; return AE_TYPE;
} }
acpi_ipmi_msg_get(tx_msg);
mutex_lock(&driver_data.ipmi_lock); mutex_lock(&driver_data.ipmi_lock);
/* Do not add a tx_msg that can not be flushed. */ /* Do not add a tx_msg that can not be flushed. */
if (ipmi_device->dead) { if (ipmi_device->dead) {
status = AE_NOT_EXIST;
mutex_unlock(&driver_data.ipmi_lock); mutex_unlock(&driver_data.ipmi_lock);
goto out_msg; ipmi_msg_release(tx_msg);
return AE_NOT_EXIST;
} }
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
...@@ -523,20 +581,15 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, ...@@ -523,20 +581,15 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
NULL, 0, 0, IPMI_TIMEOUT); NULL, 0, 0, IPMI_TIMEOUT);
if (err) { if (err) {
status = AE_ERROR; status = AE_ERROR;
goto out_list; goto out_msg;
} }
wait_for_completion(&tx_msg->tx_complete); wait_for_completion(&tx_msg->tx_complete);
acpi_format_ipmi_response(tx_msg, value); acpi_format_ipmi_response(tx_msg, value);
status = AE_OK; status = AE_OK;
out_list:
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_del(&tx_msg->head);
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
out_msg: out_msg:
kfree(tx_msg); ipmi_cancel_tx_msg(ipmi_device, tx_msg);
out_ref: acpi_ipmi_msg_put(tx_msg);
acpi_ipmi_dev_put(ipmi_device);
return status; return status;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册