提交 a6df590d 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

......@@ -199,8 +199,7 @@ static int mthca_cmd_post(struct mthca_dev *dev,
{
int err = 0;
if (down_interruptible(&dev->cmd.hcr_sem))
return -EINTR;
mutex_lock(&dev->cmd.hcr_mutex);
if (event) {
unsigned long end = jiffies + GO_BIT_TIMEOUT;
......@@ -238,7 +237,7 @@ static int mthca_cmd_post(struct mthca_dev *dev,
op), dev->hcr + 6 * 4);
out:
up(&dev->cmd.hcr_sem);
mutex_unlock(&dev->cmd.hcr_mutex);
return err;
}
......@@ -255,8 +254,7 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
int err = 0;
unsigned long end;
if (down_interruptible(&dev->cmd.poll_sem))
return -EINTR;
down(&dev->cmd.poll_sem);
err = mthca_cmd_post(dev, in_param,
out_param ? *out_param : 0,
......@@ -333,8 +331,7 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
int err = 0;
struct mthca_cmd_context *context;
if (down_interruptible(&dev->cmd.event_sem))
return -EINTR;
down(&dev->cmd.event_sem);
spin_lock(&dev->cmd.context_lock);
BUG_ON(dev->cmd.free_head < 0);
......@@ -438,7 +435,7 @@ static int mthca_cmd_imm(struct mthca_dev *dev,
int mthca_cmd_init(struct mthca_dev *dev)
{
sema_init(&dev->cmd.hcr_sem, 1);
mutex_init(&dev->cmd.hcr_mutex);
sema_init(&dev->cmd.poll_sem, 1);
dev->cmd.use_events = 0;
......
......@@ -44,6 +44,8 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <linux/mutex.h>
#include <asm/semaphore.h>
#include "mthca_provider.h"
......@@ -111,7 +113,7 @@ enum {
struct mthca_cmd {
struct pci_pool *pool;
int use_events;
struct semaphore hcr_sem;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
int max_cmds;
......@@ -256,7 +258,7 @@ struct mthca_av_table {
};
struct mthca_mcg_table {
struct semaphore sem;
struct mutex mutex;
struct mthca_alloc alloc;
struct mthca_icm_table *table;
};
......@@ -301,7 +303,7 @@ struct mthca_dev {
u64 ddr_end;
MTHCA_DECLARE_DOORBELL_LOCK(doorbell_lock)
struct semaphore cap_mask_mutex;
struct mutex cap_mask_mutex;
void __iomem *hcr;
void __iomem *kar;
......
......@@ -155,6 +155,13 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
return -ENODEV;
}
if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
"PCI resource 2 size of 0x%lx, aborting.\n",
dev_lim->uar_size, pci_resource_len(mdev->pdev, 2));
return -ENODEV;
}
mdev->limits.num_ports = dev_lim->num_ports;
mdev->limits.vl_cap = dev_lim->max_vl;
mdev->limits.mtu_cap = dev_lim->max_mtu;
......@@ -976,8 +983,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 2) != 1 << 23) {
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing UAR, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
......
......@@ -154,10 +154,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return PTR_ERR(mailbox);
mgm = mailbox->buf;
if (down_interruptible(&dev->mcg_table.sem)) {
err = -EINTR;
goto err_sem;
}
mutex_lock(&dev->mcg_table.mutex);
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
......@@ -241,8 +238,8 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
BUG_ON(index < dev->limits.num_mgms);
mthca_free(&dev->mcg_table.alloc, index);
}
up(&dev->mcg_table.sem);
err_sem:
mutex_unlock(&dev->mcg_table.mutex);
mthca_free_mailbox(dev, mailbox);
return err;
}
......@@ -263,10 +260,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return PTR_ERR(mailbox);
mgm = mailbox->buf;
if (down_interruptible(&dev->mcg_table.sem)) {
err = -EINTR;
goto err_sem;
}
mutex_lock(&dev->mcg_table.mutex);
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
......@@ -371,8 +365,8 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
out:
up(&dev->mcg_table.sem);
err_sem:
mutex_unlock(&dev->mcg_table.mutex);
mthca_free_mailbox(dev, mailbox);
return err;
}
......@@ -389,7 +383,7 @@ int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
if (err)
return err;
init_MUTEX(&dev->mcg_table.sem);
mutex_init(&dev->mcg_table.mutex);
return 0;
}
......
......@@ -50,7 +50,7 @@ enum {
};
struct mthca_user_db_table {
struct semaphore mutex;
struct mutex mutex;
struct {
u64 uvirt;
struct scatterlist mem;
......@@ -158,7 +158,7 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
int ret = 0;
u8 status;
down(&table->mutex);
mutex_lock(&table->mutex);
if (table->icm[i]) {
++table->icm[i]->refcount;
......@@ -184,7 +184,7 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
++table->icm[i]->refcount;
out:
up(&table->mutex);
mutex_unlock(&table->mutex);
return ret;
}
......@@ -198,7 +198,7 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
down(&table->mutex);
mutex_lock(&table->mutex);
if (--table->icm[i]->refcount == 0) {
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
......@@ -207,7 +207,7 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
table->icm[i] = NULL;
}
up(&table->mutex);
mutex_unlock(&table->mutex);
}
void *mthca_table_find(struct mthca_icm_table *table, int obj)
......@@ -220,7 +220,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
if (!table->lowmem)
return NULL;
down(&table->mutex);
mutex_lock(&table->mutex);
idx = (obj & (table->num_obj - 1)) * table->obj_size;
icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
......@@ -240,7 +240,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
}
out:
up(&table->mutex);
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
}
......@@ -301,7 +301,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
init_MUTEX(&table->mutex);
mutex_init(&table->mutex);
for (i = 0; i < num_icm; ++i)
table->icm[i] = NULL;
......@@ -380,7 +380,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
if (index < 0 || index > dev->uar_table.uarc_size / 8)
return -EINVAL;
down(&db_tab->mutex);
mutex_lock(&db_tab->mutex);
i = index / MTHCA_DB_REC_PER_PAGE;
......@@ -424,7 +424,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
db_tab->page[i].refcount = 1;
out:
up(&db_tab->mutex);
mutex_unlock(&db_tab->mutex);
return ret;
}
......@@ -439,11 +439,11 @@ void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
* pages until we clean up the whole db table.
*/
down(&db_tab->mutex);
mutex_lock(&db_tab->mutex);
--db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
up(&db_tab->mutex);
mutex_unlock(&db_tab->mutex);
}
struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
......@@ -460,7 +460,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
if (!db_tab)
return ERR_PTR(-ENOMEM);
init_MUTEX(&db_tab->mutex);
mutex_init(&db_tab->mutex);
for (i = 0; i < npages; ++i) {
db_tab->page[i].refcount = 0;
db_tab->page[i].uvirt = 0;
......@@ -499,7 +499,7 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
int ret = 0;
u8 status;
down(&dev->db_tab->mutex);
mutex_lock(&dev->db_tab->mutex);
switch (type) {
case MTHCA_DB_TYPE_CQ_ARM:
......@@ -585,7 +585,7 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
*db = (__be32 *) &page->db_rec[j];
out:
up(&dev->db_tab->mutex);
mutex_unlock(&dev->db_tab->mutex);
return ret;
}
......@@ -601,7 +601,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
page = dev->db_tab->page + i;
down(&dev->db_tab->mutex);
mutex_lock(&dev->db_tab->mutex);
page->db_rec[j] = 0;
if (i >= dev->db_tab->min_group2)
......@@ -624,7 +624,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
++dev->db_tab->min_group2;
}
up(&dev->db_tab->mutex);
mutex_unlock(&dev->db_tab->mutex);
}
int mthca_init_db_tab(struct mthca_dev *dev)
......@@ -638,7 +638,7 @@ int mthca_init_db_tab(struct mthca_dev *dev)
if (!dev->db_tab)
return -ENOMEM;
init_MUTEX(&dev->db_tab->mutex);
mutex_init(&dev->db_tab->mutex);
dev->db_tab->npages = dev->uar_table.uarc_size / 4096;
dev->db_tab->max_group1 = 0;
......
......@@ -39,8 +39,7 @@
#include <linux/list.h>
#include <linux/pci.h>
#include <asm/semaphore.h>
#include <linux/mutex.h>
#define MTHCA_ICM_CHUNK_LEN \
((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
......@@ -64,7 +63,7 @@ struct mthca_icm_table {
int num_obj;
int obj_size;
int lowmem;
struct semaphore mutex;
struct mutex mutex;
struct mthca_icm *icm[0];
};
......@@ -147,7 +146,7 @@ struct mthca_db_table {
int max_group1;
int min_group2;
struct mthca_db_page *page;
struct semaphore mutex;
struct mutex mutex;
};
enum mthca_db_type {
......
......@@ -185,7 +185,7 @@ static int mthca_modify_port(struct ib_device *ibdev,
int err;
u8 status;
if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
err = mthca_query_port(ibdev, port, &attr);
......@@ -207,7 +207,7 @@ static int mthca_modify_port(struct ib_device *ibdev,
}
out:
up(&to_mdev(ibdev)->cap_mask_mutex);
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
return err;
}
......@@ -1185,7 +1185,7 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.post_recv = mthca_tavor_post_receive;
}
init_MUTEX(&dev->cap_mask_mutex);
mutex_init(&dev->cap_mask_mutex);
ret = ib_register_device(&dev->ib_dev);
if (ret)
......
......@@ -357,9 +357,9 @@ static void srp_remove_work(void *target_ptr)
target->state = SRP_TARGET_REMOVED;
spin_unlock_irq(target->scsi_host->host_lock);
down(&target->srp_host->target_mutex);
mutex_lock(&target->srp_host->target_mutex);
list_del(&target->list);
up(&target->srp_host->target_mutex);
mutex_unlock(&target->srp_host->target_mutex);
scsi_remove_host(target->scsi_host);
ib_destroy_cm_id(target->cm_id);
......@@ -1254,9 +1254,9 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
if (scsi_add_host(target->scsi_host, host->dev->dma_device))
return -ENODEV;
down(&host->target_mutex);
mutex_lock(&host->target_mutex);
list_add_tail(&target->list, &host->target_list);
up(&host->target_mutex);
mutex_unlock(&host->target_mutex);
target->state = SRP_TARGET_LIVE;
......@@ -1525,7 +1525,7 @@ static struct srp_host *srp_add_port(struct ib_device *device, u8 port)
return NULL;
INIT_LIST_HEAD(&host->target_list);
init_MUTEX(&host->target_mutex);
mutex_init(&host->target_mutex);
init_completion(&host->released);
host->dev = device;
host->port = port;
......@@ -1626,7 +1626,7 @@ static void srp_remove_one(struct ib_device *device)
* Mark all target ports as removed, so we stop queueing
* commands and don't try to reconnect.
*/
down(&host->target_mutex);
mutex_lock(&host->target_mutex);
list_for_each_entry_safe(target, tmp_target,
&host->target_list, list) {
spin_lock_irqsave(target->scsi_host->host_lock, flags);
......@@ -1634,7 +1634,7 @@ static void srp_remove_one(struct ib_device *device)
target->state = SRP_TARGET_REMOVED;
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
}
up(&host->target_mutex);
mutex_unlock(&host->target_mutex);
/*
* Wait for any reconnection tasks that may have
......
......@@ -37,8 +37,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <asm/semaphore.h>
#include <linux/mutex.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
......@@ -85,7 +84,7 @@ struct srp_host {
struct ib_mr *mr;
struct class_device class_dev;
struct list_head target_list;
struct semaphore target_mutex;
struct mutex target_mutex;
struct completion released;
struct list_head list;
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册