提交 0a6cb34f 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 - stack corruption fix for pseries hwrng driver
 - add missing DMA unmap in caam crypto driver
 - fix NUMA crash in qat crypto driver
 - fix buggy mapping of zero-length associated data in qat crypto driver

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  hwrng: pseries - port to new read API and fix stack corruption
  crypto: caam - fix missing dma unmap on error path
  crypto: qat - Enforce valid numa configuration
  crypto: qat - Prevent dma mapping zero length assoc data
......@@ -25,18 +25,21 @@
#include <asm/vio.h>
static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
u64 buffer[PLPAR_HCALL_BUFSIZE];
size_t size = max < 8 ? max : 8;
int rc;
rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
if (rc != H_SUCCESS) {
pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
return -EIO;
}
memcpy(data, buffer, size);
/* The hypervisor interface returns 64 bits */
return 8;
return size;
}
/**
......@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
static struct hwrng pseries_rng = {
.name = KBUILD_MODNAME,
.data_read = pseries_rng_data_read,
.read = pseries_rng_read,
};
static int __init pseries_rng_probe(struct vio_dev *dev,
......
......@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
int ret = 0;
int ret = -ENOMEM;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
return ret;
}
init_job_desc(desc, 0);
dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_in)) {
dev_err(jrdev, "unable to map key input memory\n");
kfree(desc);
return -ENOMEM;
goto out_free;
}
dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
goto out_unmap_in;
}
init_job_desc(desc, 0);
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
......@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
kfree(desc);
return -ENOMEM;
}
append_fifo_store(desc, dma_addr_out, split_key_len,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
......@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
out_free:
kfree(desc);
return ret;
}
EXPORT_SYMBOL(gen_split_key);
......@@ -198,8 +198,7 @@ struct adf_accel_dev {
struct dentry *debugfs_dir;
struct list_head list;
struct module *owner;
uint8_t accel_id;
uint8_t numa_node;
struct adf_accel_pci accel_pci_dev;
uint8_t accel_id;
} __packed;
#endif
......@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) {
ring->inflights = kzalloc_node(sizeof(atomic_t),
GFP_KERNEL,
accel_dev->numa_node);
ring->inflights =
kzalloc_node(sizeof(atomic_t),
GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!ring->inflights)
goto err;
} else {
......@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
accel_dev->numa_node);
dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data)
return -ENOMEM;
num_banks = GET_MAX_BANKS(accel_dev);
size = num_banks * sizeof(struct adf_etr_bank_data);
etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
etr_data->banks = kzalloc_node(size, GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data->banks) {
ret = -ENOMEM;
goto err_bank;
......
......@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!n))
return -EINVAL;
bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
bufl = kmalloc_node(sz, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!bufl))
return -ENOMEM;
......@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err;
for_each_sg(assoc, sg, assoc_n, i) {
if (!sg->length)
continue;
bufl->bufers[bufs].addr = dma_map_single(dev,
sg_virt(sg),
sg->length,
......@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
buflout = kmalloc_node(sz, GFP_ATOMIC,
inst->accel_dev->numa_node);
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err;
bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
......
......@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list);
if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
dev_to_node(&GET_DEV(accel_dev)) < 0)
&& adf_dev_started(accel_dev))
break;
accel_dev = NULL;
}
if (!accel_dev) {
pr_err("QAT: Could not find device on give node\n");
pr_err("QAT: Could not find device on node %d\n", node);
accel_dev = adf_devmgr_get_first();
}
if (!accel_dev || !adf_dev_started(accel_dev))
......@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
for (i = 0; i < num_inst; i++) {
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
accel_dev->numa_node);
dev_to_node(&GET_DEV(accel_dev)));
if (!inst)
goto err;
......
......@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
uint64_t reg_val;
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
accel_dev->numa_node);
dev_to_node(&GET_DEV(accel_dev)));
if (!admin)
return -ENOMEM;
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
......
......@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev);
}
static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
{
unsigned int bus_per_cpu = 0;
struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
if (!c->phys_proc_id)
return 0;
bus_per_cpu = 256 / (c->phys_proc_id + 1);
if (bus_per_cpu != 0)
return pdev->bus->number / bus_per_cpu;
return 0;
}
static int qat_dev_start(struct adf_accel_dev *accel_dev)
{
int cpus = num_online_cpus();
......@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *pmisc_bar_addr = NULL;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
uint8_t node;
int ret;
switch (ent->device) {
......@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
node = adf_get_dev_node_id(pdev);
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
/* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow. */
dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
accel_dev->numa_node = node;
INIT_LIST_HEAD(&accel_dev->crypto_list);
/* Add accel device to accel table.
......@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
......
......@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
uint32_t msix_num_entries = hw_data->num_banks + 1;
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
GFP_KERNEL, accel_dev->numa_node);
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!entries)
return -ENOMEM;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册