提交 f1ae75f1 编写于 作者: X xuzaibo 提交者: Xie XiuQi

Add Hi1620 ZIP&HPRE drivers with common QM driver, which are adpated to Crypto and SPIMDEV.

Feature or Bugfix:Feature
Signed-off-by: Nxuzaibo <xuzaibo@huawei.com>
上级 d5cc7d54
......@@ -12,3 +12,36 @@ config CRYPTO_DEV_HISI_SEC
To compile this as a module, choose M here: the module
will be called hisi_sec.
config CRYPTO_DEV_HISILICON
tristate "Support for HISILICON CRYPTO ACCELERATOR"
help
Enable this to use Hisilicon Hardware Accelerators
config CRYPTO_DEV_HISI_SPIMDEV
bool "Enable SPIMDEV interface"
depends on CRYPTO_DEV_HISILICON
select VFIO_SPIMDEV
help
Enable this enable the SPIMDEV, "shared parent IOMMU Mediated Device"
interface for all Hisilicon accelerators if they can. The SPIMDEV
enable the WarpDrive user space accelerator driver to access the
hardware function directly.
config CRYPTO_DEV_HISI_QM
tristate
depends on ARM64 && PCI
config CRYPTO_DEV_HISI_ZIP
tristate "Support for HISI ZIP Driver"
depends on ARM64 && CRYPTO_DEV_HISILICON
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon HIP08 ZIP Driver
config CRYPTO_DEV_HISI_HPRE
tristate "Support for HISI HPRE Driver"
depends on ARM64 && CRYPTO_DEV_HISILICON
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon HIP08 HPRE Driver
\ No newline at end of file
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hisi_hpre.o
hisi_hpre-objs = hpre_main.o hpre_crypto.o
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __HISI_HPRE_H
#define __HISI_HPRE_H
#include <linux/list.h>
#include "../qm.h"
#define HPRE_SQE_SIZE 64
#define HPRE_SQ_SIZE (HPRE_SQE_SIZE * QM_Q_DEPTH)
#define QM_CQ_SIZE (QM_CQE_SIZE * QM_Q_DEPTH)
#define HPRE_PF_DEF_Q_NUM 64
#define HPRE_PF_DEF_Q_BASE 0
struct hisi_hpre {
struct qm_info qm;
struct list_head list;
#ifdef CONFIG_CRYPTO_DEV_HISI_SPIMDEV
struct vfio_spimdev *spimdev;
#endif
};
enum hisi_hpre_alg_type {
HPRE_ALG_NC_NCRT = 0x0,
HPRE_ALG_NC_CRT = 0x1,
HPRE_ALG_KG_STD = 0x2,
HPRE_ALG_KG_CRT = 0x3,
HPRE_ALG_DH_G2 = 0x4,
HPRE_ALG_DH = 0x5,
HPRE_ALG_PRIME = 0x6,
HPRE_ALG_MOD = 0x7,
HPRE_ALG_MOD_INV = 0x8,
HPRE_ALG_MUL = 0x9,
HPRE_ALG_COPRIME = 0xA
};
struct hisi_hpre_sqe {
__u32 alg : 5;
/* error type */
__u32 etype :11;
__u32 resv0 : 14;
__u32 done : 2;
__u32 task_len1 : 8;
__u32 task_len2 : 8;
__u32 mrttest_num : 8;
__u32 resv1 : 8;
__u32 low_key;
__u32 hi_key;
__u32 low_in;
__u32 hi_in;
__u32 low_out;
__u32 hi_out;
__u32 tag :16;
__u32 resv2 :16;
__u32 rsvd1[7];
};
extern struct list_head hisi_hpre_list;
extern int hpre_algs_register(void);
extern void hpre_algs_unregister(void);
#endif
此差异已折叠。
// SPDX-License-Identifier: GPL-2.0+
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vfio_spimdev.h>
#include "hpre.h"
#define HPRE_VF_NUM 63
#define HPRE_QUEUE_NUM_V1 4096
#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_COMM_CNT_CLR_CE 0x0
#define HPRE_FSM_MAX_CNT 0x301008
#define HPRE_VFG_AXQOS 0x30100c
#define HPRE_VFG_AXCACHE 0x301010
#define HPRE_RDCHN_INI_CFG 0x301014
#define HPRE_BD_ENDIAN 0x301020
#define HPRE_ECC_BYPASS 0x301024
#define HPRE_POISON_BYPASS 0x30102c
#define HPRE_ARUSR_CFG 0x301030
#define HPRE_AWUSR_CFG 0x301034
#define HPRE_INT_MASK 0x301400
#define HPRE_RAS_ECC_1BIT_TH 0x30140c
#define HPRE_TYPES_ENB 0x301038
#define HPRE_PORT_ARCA_CHE_0 0x301040
#define HPRE_PORT_ARCA_CHE_1 0x301044
#define HPRE_PORT_AWCA_CHE_0 0x301060
#define HPRE_PORT_AWCA_CHE_1 0x301064
#define HPRE_BD_RUSER_32_63 0x301110
#define HPRE_SGL_RUSER_32_63 0x30111c
#define HPRE_DATA_RUSER_32_63 0x301128
#define HPRE_DATA_WUSER_32_63 0x301134
#define HPRE_BD_WUSER_32_63 0x301140
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CORE_ENB 0x302004
#define HPRE_CORE_INI_CFG 0x302020
#define HPRE_CORE_INI_STATUS 0x302080
LIST_HEAD(hisi_hpre_list);
DEFINE_MUTEX(hisi_hpre_list_lock);
static const struct pci_device_id hisi_hpre_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa258) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa259) },
{ 0, }
};
static inline void hisi_hpre_add_to_list(struct hisi_hpre *hisi_hpre)
{
mutex_lock(&hisi_hpre_list_lock);
list_add_tail(&hisi_hpre->list, &hisi_hpre_list);
mutex_unlock(&hisi_hpre_list_lock);
}
static inline void hisi_hpre_remove_from_list(struct hisi_hpre *hisi_hpre)
{
mutex_lock(&hisi_hpre_list_lock);
list_del(&hisi_hpre->list);
mutex_unlock(&hisi_hpre_list_lock);
}
static int hisi_hpre_set_user_domain_and_cache(struct hisi_hpre *hisi_hpre)
{
int ret;
u32 val;
writel(0x1, hisi_hpre->qm.io_base + HPRE_TYPES_ENB);
writel(0x0, hisi_hpre->qm.io_base + HPRE_VFG_AXQOS);
writel(0xff, hisi_hpre->qm.io_base + HPRE_VFG_AXCACHE);
writel(0x0, hisi_hpre->qm.io_base + HPRE_BD_ENDIAN);
writel(0x0, hisi_hpre->qm.io_base + HPRE_INT_MASK);
writel(0x0, hisi_hpre->qm.io_base + HPRE_RAS_ECC_1BIT_TH);
writel(0x0, hisi_hpre->qm.io_base + HPRE_POISON_BYPASS);
writel(0x0, hisi_hpre->qm.io_base + HPRE_COMM_CNT_CLR_CE);
writel(0x0, hisi_hpre->qm.io_base + HPRE_ECC_BYPASS);
#ifndef CONFIG_ARM_SMMU_V3
writel(0x1, hisi_hpre->qm.io_base + HPRE_ARUSR_CFG);
writel(0x1, hisi_hpre->qm.io_base + HPRE_AWUSR_CFG);
#else
writel(0x203, hisi_hpre->qm.io_base + HPRE_ARUSR_CFG);
writel(0x203, hisi_hpre->qm.io_base + HPRE_AWUSR_CFG);
#endif
writel(0x1, hisi_hpre->qm.io_base + HPRE_RDCHN_INI_CFG);
ret = readl_relaxed_poll_timeout(hisi_hpre->qm.io_base +
HPRE_RDCHN_INI_ST, val, val & BIT(0), 10, 1000);
if (ret) {
pr_err("\nHPRE:INI ST TIMEOUT");
return -ETIMEDOUT;
}
/* First cluster initiating */
writel(0xf, hisi_hpre->qm.io_base + HPRE_CORE_ENB);
writel(0x1, hisi_hpre->qm.io_base + HPRE_CORE_INI_CFG);
ret = readl_relaxed_poll_timeout(hisi_hpre->qm.io_base +
HPRE_CORE_INI_STATUS,
val, ((val & 0xf) == 0xf), 10, 1000);
if (ret) {
pr_err("\nHPRE:CLUSTER 1 INI ST STATUS timeout");
return -ETIMEDOUT;
}
/* Second cluster initiating, reg's address is 0x1000 more*/
/* writel(0xf, hpre->io_base + 0x1000 + HPRE_CORE_ENB);*/
writel(0x0, hisi_hpre->qm.io_base + 0x1000 + HPRE_CORE_ENB);
writel(0x1, hisi_hpre->qm.io_base + 0x1000 + HPRE_CORE_INI_CFG);
ret = readl_relaxed_poll_timeout(hisi_hpre->qm.io_base + 0x1000 +
HPRE_CORE_INI_STATUS,
val, ((val & 0xf) == 0xf), 10, 1000);
if (ret) {
pr_err("\nHPRE:CLUSTER 2 INI ST STATUS timeout");
return -ETIMEDOUT;
}
return ret;
}
static int hisi_hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_hpre *hisi_hpre;
struct qm_info *qm;
int ret;
u8 rev_id = 0;
#ifdef CONFIG_ARM_SMMU_V3
u32 val;
#endif
hisi_hpre = devm_kzalloc(&pdev->dev, sizeof(*hisi_hpre), GFP_KERNEL);
if (!hisi_hpre)
return -ENOMEM;
hisi_hpre_add_to_list(hisi_hpre);
qm = &hisi_hpre->qm;
qm->pdev = pdev;
pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
if (rev_id == 0x20)
qm->ver = QM_HW_V1;
else if (rev_id == 0x21)
qm->ver = QM_HW_V2;
qm->sqe_size = HPRE_SQE_SIZE;
ret = hisi_qm_init(qm, HPRE);
if (ret)
goto err_with_hisi_hpre;
#define HPRE_ADDR(offset) QM_ADDR(qm, offset)
if (pdev->is_physfn) {
/* user domain */
writel(0x40000070, HPRE_ADDR(QM_ARUSER_M_CFG_1));
writel(0x007ffffc, HPRE_ADDR(QM_ARUSER_M_CFG_ENABLE));
writel(0x40000070, HPRE_ADDR(QM_AWUSER_M_CFG_1));
writel(0x007ffffc, HPRE_ADDR(QM_AWUSER_M_CFG_ENABLE));
writel(0x00000001, HPRE_ADDR(QM_WUSER_M_CFG_ENABLE));
writel(0x1833, HPRE_ADDR(QM_CACHE_CTL));
writel(0x00400001, HPRE_ADDR(QM_PEH_AXUSER_CFG));
#ifdef CONFIG_ARM_SMMU_V3
writel(0x40000070, HPRE_ADDR(QM_ARUSER_M_CFG_1));
writel(0xfffffffe, HPRE_ADDR(QM_ARUSER_M_CFG_ENABLE));
writel(0x40000070, HPRE_ADDR(QM_AWUSER_M_CFG_1));
writel(0xfffffffe, HPRE_ADDR(QM_AWUSER_M_CFG_ENABLE));
val = readl_relaxed(HPRE_ADDR(QM_ARUSER_M_CFG_1));
val &= ~GENMASK(14, 12);
val |= (1 << 12);
writel(val, HPRE_ADDR(QM_ARUSER_M_CFG_1));
val = readl_relaxed(HPRE_ADDR(QM_AWUSER_M_CFG_1));
val &= ~GENMASK(14, 12);
val |= (1 << 12);
writel(val, HPRE_ADDR(QM_AWUSER_M_CFG_1));
val = readl_relaxed(HPRE_ADDR(QM_ARUSER_M_CFG_ENABLE));
val &= ~0x1;
writel(val, HPRE_ADDR(QM_ARUSER_M_CFG_ENABLE));
val = readl_relaxed(HPRE_ADDR(QM_AWUSER_M_CFG_ENABLE));
val &= ~0x1;
writel(val, HPRE_ADDR(QM_AWUSER_M_CFG_ENABLE));
#endif
/* cache */
writel_relaxed(0x0303, /* 0xffff IT */
HPRE_ADDR(QM_AXI_M_CFG));
writel_relaxed(0xf,
HPRE_ADDR(QM_AXI_M_CFG_ENABLE));
writel_relaxed(0x7f,
HPRE_ADDR(QM_PEH_AXUSER_CFG_ENABLE));
#ifdef CONFIG_ARM_SMMU_V3
writel_relaxed(0xffff,
HPRE_ADDR(QM_AXI_M_CFG));
writel_relaxed(0xffffffff,
HPRE_ADDR(QM_AXI_M_CFG_ENABLE));
writel_relaxed(0xffffffff,
HPRE_ADDR(QM_PEH_AXUSER_CFG_ENABLE));
#endif
ret = hisi_qm_mem_start(qm);
if (ret)
goto err_with_qm_init;
ret = hisi_hpre_set_user_domain_and_cache(hisi_hpre);
if (ret)
return ret;
qm->qp_base = HPRE_PF_DEF_Q_BASE;
qm->qp_num = HPRE_PF_DEF_Q_NUM;
qm->free_qp = qm->qp_num;
}
ret = hisi_qm_start(qm);
if (ret)
goto err_with_qm_init;
/* todo: exception irq handler register, ES did not support */
return 0;
err_with_qm_init:
hisi_qm_uninit(qm);
err_with_hisi_hpre:
hisi_hpre_remove_from_list(hisi_hpre);
kfree(hisi_hpre);
return ret;
}
static void hisi_hpre_remove(struct pci_dev *pdev)
{
struct hisi_hpre *hisi_hpre = pci_get_drvdata(pdev);
struct qm_info *qm = &hisi_hpre->qm;
hisi_qm_stop(qm);
hisi_qm_uninit(qm);
hisi_hpre_remove_from_list(hisi_hpre);
kfree(hisi_hpre);
}
static int hisi_hpre_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
/* todo: set queue number for VFs */
return 0;
}
static struct pci_driver hisi_hpre_pci_driver = {
.name = "hisi_hpre",
.id_table = hisi_hpre_dev_ids,
.probe = hisi_hpre_probe,
.remove = hisi_hpre_remove,
.sriov_configure = hisi_hpre_pci_sriov_configure
};
static int __init hisi_hpre_init(void)
{
int ret;
ret = pci_register_driver(&hisi_hpre_pci_driver);
if (ret < 0) {
pr_err("hpre: can't register hisi hpre driver.\n");
return ret;
}
ret = hpre_algs_register();
if (ret < 0) {
pr_err("hpre: can't register hisi hpre to crypto.\n");
pci_unregister_driver(&hisi_hpre_pci_driver);
return ret;
}
return 0;
}
static void __exit hisi_hpre_exit(void)
{
hpre_algs_unregister();
pci_unregister_driver(&hisi_hpre_pci_driver);
}
module_init(hisi_hpre_init);
module_exit(hisi_hpre_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
MODULE_DEVICE_TABLE(pci, hisi_hpre_dev_ids);
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef HISI_ACC_QM_H
#define HISI_ACC_QM_H
#include <linux/dmapool.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#ifdef CONFIG_CRYPTO_DEV_HISI_SPIMDEV
#include <linux/vfio_spimdev.h>
#endif
#define QM_HW_V1 1
#define QM_HW_V2 2
#define QM_CQE_SIZE 16
/* default queue depth for sq/cq/eq */
#define QM_Q_DEPTH 1024
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define QM_ARUSER_M_CFG_ENABLE 0x100090
#define QM_AWUSER_M_CFG_1 0x100098
#define QM_AWUSER_M_CFG_ENABLE 0x1000a0
#define QM_WUSER_M_CFG_ENABLE 0x1000a8
/* qm cache */
#define QM_CACHE_CTL 0x100050
#define QM_AXI_M_CFG 0x1000ac
#define QM_AXI_M_CFG_ENABLE 0x1000b0
#define QM_PEH_AXUSER_CFG 0x1000cc
#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
#define QP_SQE_ADDR(qp) ((qp)->scqe.addr)
#define _GET_DMA_PAGES _IOW('d', 3, unsigned long long)
#define _PUT_DMA_PAGES _IOW('d', 4, unsigned long long)
enum qm_type {
ZIP = 1,
HPRE,
SEC,
};
struct qm_dma_buffer {
int size;
void *addr;
dma_addr_t dma;
};
struct qm_info {
int ver;
enum qm_type type;
const char *dev_name;
struct pci_dev *pdev;
resource_size_t phys_base;
resource_size_t size;
void __iomem *io_base;
u32 sqe_size;
u32 qp_base;
u32 qp_num;
u32 free_qp;
struct qm_dma_buffer sqc, cqc, eqc, eqe;
u32 eq_head;
rwlock_t qps_lock;
unsigned long *qp_bitmap;
struct hisi_qp **qp_array;
struct mutex mailbox_lock;
struct hisi_acc_qm_hw_ops *ops;
#ifdef CONFIG_CRYPTO_DEV_HISI_SPIMDEV
struct vfio_spimdev spimdev;
const struct attribute_group **mdev_dev_groups;
#endif
};
#define QM_ADDR(qm, off) ((qm)->io_base + off)
struct hisi_acc_qp_status {
u16 sq_tail;
u16 sq_head;
u16 cq_head;
bool cqc_phase;
int is_sq_full;
};
struct hisi_qp;
struct hisi_qp_ops {
int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
};
struct qp_phy_pages {
u64 size;
int order;
int node_id;
u64 phy_addr;
};
struct hisi_qp {
/* sq number in this function */
u32 queue_id;
u8 alg_type;
u8 req_type;
struct qm_dma_buffer sqc, cqc;
struct qm_dma_buffer scqe;
struct hisi_acc_qp_status qp_status;
struct qm_info *qm;
struct qp_phy_pages udma_buf;/* For user space */
#ifdef CONFIG_CRYPTO_DEV_HISI_SPIMDEV
struct vfio_spimdev_queue *spimdev_q;
#endif
/* for crypto sync API */
struct completion completion;
struct hisi_qp_ops *hw_ops;
void *qp_ctx;
void (*event_cb)(struct hisi_qp *qp);
void (*req_cb)(struct hisi_qp *qp, void *data);
};
extern int hisi_qm_init(struct qm_info *qm, enum qm_type type);
extern void hisi_qm_uninit(struct qm_info *qm);
extern int hisi_qm_start(struct qm_info *qm);
extern void hisi_qm_stop(struct qm_info *qm);
extern int hisi_qm_mem_start(struct qm_info *qm);
extern struct hisi_qp *hisi_qm_create_qp(struct qm_info *qm, u8 alg_type);
extern int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
extern void hisi_qm_release_qp(struct hisi_qp *qp);
extern int hisi_qp_send(struct hisi_qp *qp, void *msg);
#endif
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
hisi_zip-objs = zip_main.o zip_crypto.o
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef HISI_ZIP_H
#define HISI_ZIP_H
#include <linux/list.h>
#include "../qm.h"
#define HZIP_SQE_SIZE 128
#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
#define QM_CQ_SIZE (QM_CQE_SIZE * QM_Q_DEPTH)
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
struct hisi_zip {
struct qm_info qm;
struct list_head list;
#ifdef CONFIG_CRYPTO_DEV_HISI_SPIMDEV
struct vfio_spimdev *spimdev;
#endif
};
struct hisi_zip_sqe {
__u32 consumed;
__u32 produced;
__u32 comp_data_length;
__u32 dw3;
__u32 input_data_length;
__u32 lba_l;
__u32 lba_h;
__u32 dw7;
__u32 dw8;
__u32 dw9;
__u32 dw10;
__u32 priv_info;
__u32 dw12;
__u32 tag;
__u32 dest_avail_out;
__u32 rsvd0;
__u32 comp_head_addr_l;
__u32 comp_head_addr_h;
__u32 source_addr_l;
__u32 source_addr_h;
__u32 dest_addr_l;
__u32 dest_addr_h;
__u32 stream_ctx_addr_l;
__u32 stream_ctx_addr_h;
__u32 cipher_key1_addr_l;
__u32 cipher_key1_addr_h;
__u32 cipher_key2_addr_l;
__u32 cipher_key2_addr_h;
__u32 rsvd1[4];
};
extern struct list_head hisi_zip_list;
#endif
// SPDX-License-Identifier: GPL-2.0+
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/topology.h>
#include "../qm.h"
#include "zip.h"
#define INPUT_BUFFER_SIZE (64 * 1024)
#define OUTPUT_BUFFER_SIZE (64 * 1024)
#define COMP_NAME_TO_TYPE(alg_name) \
(!strcmp((alg_name), "zlib-deflate") ? 0x02 : \
!strcmp((alg_name), "gzip") ? 0x03 : 0) \
struct hisi_zip_buffer {
u8 *input;
dma_addr_t input_dma;
u8 *output;
dma_addr_t output_dma;
};
struct hisi_zip_qp_ctx {
struct hisi_zip_buffer buffer;
struct hisi_qp *qp;
struct hisi_zip_sqe zip_sqe;
};
struct hisi_zip_ctx {
#define QPC_COMP 0
#define QPC_DECOMP 1
struct hisi_zip_qp_ctx qp_ctx[2];
};
static struct hisi_zip *find_zip_device(int node)
{
struct hisi_zip *hisi_zip, *ret = NULL;
struct device *dev;
int min_distance = 100;
int dev_node = 0;
list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
dev = &hisi_zip->qm.pdev->dev;
#ifdef CONFIG_NUMA
dev_node = dev->numa_node;
#endif
if (node_distance(dev_node, node) < min_distance) {
ret = hisi_zip;
min_distance = node_distance(dev_node, node);
}
}
return ret;
}
static void hisi_zip_qp_event_notifier(struct hisi_qp *qp)
{
complete(&qp->completion);
}
static int hisi_zip_fill_sqe_v1(void *sqe, void *q_parm, u32 len)
{
struct hisi_zip_sqe *zip_sqe = (struct hisi_zip_sqe *)sqe;
struct hisi_zip_qp_ctx *qp_ctx = (struct hisi_zip_qp_ctx *)q_parm;
struct hisi_zip_buffer *buffer = &qp_ctx->buffer;
memset(zip_sqe, 0, sizeof(struct hisi_zip_sqe));
zip_sqe->input_data_length = len;
zip_sqe->dw9 = qp_ctx->qp->req_type;
zip_sqe->dest_avail_out = OUTPUT_BUFFER_SIZE;
zip_sqe->source_addr_l = lower_32_bits(buffer->input_dma);
zip_sqe->source_addr_h = upper_32_bits(buffer->input_dma);
zip_sqe->dest_addr_l = lower_32_bits(buffer->output_dma);
zip_sqe->dest_addr_h = upper_32_bits(buffer->output_dma);
return 0;
}
/* let's allocate one buffer now, may have problem in async case */
static int hisi_zip_alloc_qp_buffer(struct hisi_zip_qp_ctx *hisi_zip_qp_ctx)
{
struct hisi_zip_buffer *buffer = &hisi_zip_qp_ctx->buffer;
struct hisi_qp *qp = hisi_zip_qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
int ret;
buffer->input = dma_alloc_coherent(dev, INPUT_BUFFER_SIZE,
&buffer->input_dma, GFP_KERNEL);
if (!buffer->input)
return -ENOMEM;
buffer->output = dma_alloc_coherent(dev, OUTPUT_BUFFER_SIZE,
&buffer->output_dma, GFP_KERNEL);
if (!buffer->output) {
ret = -ENOMEM;
goto err_alloc_output_buffer;
}
return 0;
err_alloc_output_buffer:
dma_free_coherent(dev, INPUT_BUFFER_SIZE, buffer->input,
buffer->input_dma);
return ret;
}
static void hisi_zip_free_qp_buffer(struct hisi_zip_qp_ctx *hisi_zip_qp_ctx)
{
struct hisi_zip_buffer *buffer = &hisi_zip_qp_ctx->buffer;
struct hisi_qp *qp = hisi_zip_qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
dma_free_coherent(dev, INPUT_BUFFER_SIZE, buffer->input,
buffer->input_dma);
dma_free_coherent(dev, OUTPUT_BUFFER_SIZE, buffer->output,
buffer->output_dma);
}
static int hisi_zip_create_qp(struct qm_info *qm, struct hisi_zip_qp_ctx *ctx,
int alg_type, int req_type)
{
struct hisi_qp *qp;
int ret;
qp = hisi_qm_create_qp(qm, alg_type);
if (IS_ERR(qp))
return PTR_ERR(qp);
qp->event_cb = hisi_zip_qp_event_notifier;
qp->req_type = req_type;
qp->qp_ctx = ctx;
ctx->qp = qp;
ret = hisi_zip_alloc_qp_buffer(ctx);
if (ret)
goto err_with_qp;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0)
goto err_with_qp_buffer;
return 0;
err_with_qp_buffer:
hisi_zip_free_qp_buffer(ctx);
err_with_qp:
hisi_qm_release_qp(qp);
return ret;
}
static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
{
hisi_qm_release_qp(ctx->qp);
hisi_zip_free_qp_buffer(ctx);
}
static int hisi_zip_alloc_comp_ctx(struct crypto_tfm *tfm)
{
struct hisi_zip_ctx *hisi_zip_ctx = crypto_tfm_ctx(tfm);
const char *alg_name = crypto_tfm_alg_name(tfm);
struct hisi_zip *hisi_zip;
struct qm_info *qm;
int ret, i, j;
u8 req_type = COMP_NAME_TO_TYPE(alg_name);
/* find the proper zip device */
hisi_zip = find_zip_device(cpu_to_node(smp_processor_id()));
if (!hisi_zip) {
pr_err("Can not find proper ZIP device!\n");
return -ENODEV;
}
qm = &hisi_zip->qm;
for (i = 0; i < 2; i++) {
/* it is just happen that 0 is compress, 1 is decompress on alg_type */
ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i,
req_type);
if (ret)
goto err;
}
return 0;
err:
for (j = i-1; j >= 0; j--)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]);
return ret;
}
static void hisi_zip_free_comp_ctx(struct crypto_tfm *tfm)
{
struct hisi_zip_ctx *hisi_zip_ctx = crypto_tfm_ctx(tfm);
int i;
/* release the qp */
for (i = 1; i >= 0; i--)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
}
static int hisi_zip_copy_data_to_buffer(struct hisi_zip_qp_ctx *qp_ctx,
const u8 *src, unsigned int slen)
{
struct hisi_zip_buffer *buffer = &qp_ctx->buffer;
if (slen > INPUT_BUFFER_SIZE)
return -EINVAL;
memcpy(buffer->input, src, slen);
return 0;
}
static struct hisi_zip_sqe *hisi_zip_get_writeback_sqe(struct hisi_qp *qp)
{
struct hisi_acc_qp_status *qp_status = &qp->qp_status;
struct hisi_zip_sqe *sq_base = QP_SQE_ADDR(qp);
u16 sq_head = qp_status->sq_head;
return sq_base + sq_head;
}
static int hisi_zip_copy_data_from_buffer(struct hisi_zip_qp_ctx *qp_ctx,
u8 *dst, unsigned int *dlen)
{
struct hisi_zip_buffer *buffer = &qp_ctx->buffer;
struct hisi_qp *qp = qp_ctx->qp;
struct hisi_zip_sqe *zip_sqe = hisi_zip_get_writeback_sqe(qp);
u32 status = zip_sqe->dw3 & 0xff;
u16 sq_head;
if (status != 0) {
pr_err("hisi zip: %s fail!\n", (qp->alg_type == 0) ?
"compression" : "decompression");
return status;
}
if (zip_sqe->produced > OUTPUT_BUFFER_SIZE)
return -ENOMEM;
memcpy(dst, buffer->output, zip_sqe->produced);
*dlen = zip_sqe->produced;
sq_head = qp->qp_status.sq_head;
if (sq_head == QM_Q_DEPTH - 1)
qp->qp_status.sq_head = 0;
else
qp->qp_status.sq_head++;
return 0;
}
static int hisi_zip_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct hisi_zip_ctx *hisi_zip_ctx = crypto_tfm_ctx(tfm);
struct hisi_zip_qp_ctx *qp_ctx = &hisi_zip_ctx->qp_ctx[QPC_COMP];
struct hisi_qp *qp = qp_ctx->qp;
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
int ret;
ret = hisi_zip_copy_data_to_buffer(qp_ctx, src, slen);
if (ret < 0)
return ret;
hisi_zip_fill_sqe_v1(zip_sqe, qp_ctx, slen);
/* send command to start the compress job */
hisi_qp_send(qp, zip_sqe);
return hisi_zip_copy_data_from_buffer(qp_ctx, dst, dlen);
}
static int hisi_zip_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct hisi_zip_ctx *hisi_zip_ctx = crypto_tfm_ctx(tfm);
struct hisi_zip_qp_ctx *qp_ctx = &hisi_zip_ctx->qp_ctx[QPC_DECOMP];
struct hisi_qp *qp = qp_ctx->qp;
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
int ret;
ret = hisi_zip_copy_data_to_buffer(qp_ctx, src, slen);
if (ret < 0)
return ret;
hisi_zip_fill_sqe_v1(zip_sqe, qp_ctx, slen);
/* send command to start the decompress job */
hisi_qp_send(qp, zip_sqe);
return hisi_zip_copy_data_from_buffer(qp_ctx, dst, dlen);
}
static struct crypto_alg hisi_zip_zlib = {
.cra_name = "zlib-deflate",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
.cra_priority = 300,
.cra_module = THIS_MODULE,
.cra_init = hisi_zip_alloc_comp_ctx,
.cra_exit = hisi_zip_free_comp_ctx,
.cra_u = {
.compress = {
.coa_compress = hisi_zip_compress,
.coa_decompress = hisi_zip_decompress
}
}
};
static struct crypto_alg hisi_zip_gzip = {
.cra_name = "gzip",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
.cra_priority = 300,
.cra_module = THIS_MODULE,
.cra_init = hisi_zip_alloc_comp_ctx,
.cra_exit = hisi_zip_free_comp_ctx,
.cra_u = {
.compress = {
.coa_compress = hisi_zip_compress,
.coa_decompress = hisi_zip_decompress
}
}
};
int hisi_zip_register_to_crypto(void)
{
int ret;
ret = crypto_register_alg(&hisi_zip_zlib);
if (ret < 0) {
pr_err("Zlib algorithm registration failed\n");
return ret;
}
ret = crypto_register_alg(&hisi_zip_gzip);
if (ret < 0) {
pr_err("Gzip algorithm registration failed\n");
goto err_unregister_zlib;
}
return 0;
err_unregister_zlib:
crypto_unregister_alg(&hisi_zip_zlib);
return ret;
}
void hisi_zip_unregister_from_crypto(void)
{
crypto_unregister_alg(&hisi_zip_zlib);
crypto_unregister_alg(&hisi_zip_gzip);
}
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef HISI_ZIP_CRYPTO_H
#define HISI_ZIP_CRYPTO_H
int hisi_zip_register_to_crypto(void);
void hisi_zip_unregister_from_crypto(void);
#endif
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "zip.h"
#include "zip_crypto.h"
#define HZIP_VF_NUM 63
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_QUEUE_NUM_V2 1024
#define HZIP_FSM_MAX_CNT 0x301008
#define HZIP_PORT_ARCA_CHE_0 0x301040
#define HZIP_PORT_ARCA_CHE_1 0x301044
#define HZIP_PORT_AWCA_CHE_0 0x301060
#define HZIP_PORT_AWCA_CHE_1 0x301064
#define HZIP_BD_RUSER_32_63 0x301110
#define HZIP_SGL_RUSER_32_63 0x30111c
#define HZIP_DATA_RUSER_32_63 0x301128
#define HZIP_DATA_WUSER_32_63 0x301134
#define HZIP_BD_WUSER_32_63 0x301140
LIST_HEAD(hisi_zip_list);
DEFINE_MUTEX(hisi_zip_list_lock);
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa250) },
{ 0, }
};
static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
{
mutex_lock(&hisi_zip_list_lock);
list_add_tail(&hisi_zip->list, &hisi_zip_list);
mutex_unlock(&hisi_zip_list_lock);
}
static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
{
mutex_lock(&hisi_zip_list_lock);
list_del(&hisi_zip->list);
mutex_unlock(&hisi_zip_list_lock);
}
static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
{
u32 val;
/* qm user domain */
writel(0x40001070, hisi_zip->qm.io_base + QM_ARUSER_M_CFG_1);
writel(0xfffffffe, hisi_zip->qm.io_base + QM_ARUSER_M_CFG_ENABLE);
writel(0x40001070, hisi_zip->qm.io_base + QM_AWUSER_M_CFG_1);
writel(0xfffffffe, hisi_zip->qm.io_base + QM_AWUSER_M_CFG_ENABLE);
writel(0xffffffff, hisi_zip->qm.io_base + QM_WUSER_M_CFG_ENABLE);
val = readl(hisi_zip->qm.io_base + QM_PEH_AXUSER_CFG);
val |= (1 << 11);
writel(val, hisi_zip->qm.io_base + QM_PEH_AXUSER_CFG);
/* qm cache */
writel(0xffff, hisi_zip->qm.io_base + QM_AXI_M_CFG);
writel(0xffffffff, hisi_zip->qm.io_base + QM_AXI_M_CFG_ENABLE);
writel(0xffffffff, hisi_zip->qm.io_base + QM_PEH_AXUSER_CFG_ENABLE);
/* cache */
writel(0xffffffff, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_0);
writel(0xffffffff, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_1);
writel(0xffffffff, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_0);
writel(0xffffffff, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_1);
/* user domain configurations */
writel(0x40001070, hisi_zip->qm.io_base + HZIP_BD_RUSER_32_63);
writel(0x40001070, hisi_zip->qm.io_base + HZIP_SGL_RUSER_32_63);
#ifdef CONFIG_IOMMU_SVA
writel(0x40001071, hisi_zip->qm.io_base + HZIP_DATA_RUSER_32_63);
writel(0x40001071, hisi_zip->qm.io_base + HZIP_DATA_WUSER_32_63);
#else
writel(0x40001070, hisi_zip->qm.io_base + HZIP_DATA_RUSER_32_63);
writel(0x40001070, hisi_zip->qm.io_base + HZIP_DATA_WUSER_32_63);
#endif
writel(0x40001070, hisi_zip->qm.io_base + HZIP_BD_WUSER_32_63);
/* fsm count */
writel(0xfffffff, hisi_zip->qm.io_base + HZIP_FSM_MAX_CNT);
/* clock gating, core, decompress verify enable */
writel(0x10005, hisi_zip->qm.io_base + 0x301004);
}
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
struct qm_info *qm;
int ret;
u8 rev_id;
hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
if (!hisi_zip)
return -ENOMEM;
hisi_zip_add_to_list(hisi_zip);
pci_set_drvdata(pdev, hisi_zip);
qm = &hisi_zip->qm;
qm->pdev = pdev;
pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
if (rev_id == 0x20)
qm->ver = QM_HW_V1;
else if (rev_id == 0x21)
qm->ver = QM_HW_V2;
qm->sqe_size = HZIP_SQE_SIZE;
ret = hisi_qm_init(qm, ZIP);
if (ret)
goto err_with_hisi_zip;
if (pdev->is_physfn) {
ret = hisi_qm_mem_start(qm);
if (ret) {
dev_err(&pdev->dev, "Can't start QM mem of Hisilicon!\n");
goto err_with_qm_init;
}
hisi_zip_set_user_domain_and_cache(hisi_zip);
qm->qp_base = HZIP_PF_DEF_Q_BASE;
qm->qp_num = HZIP_PF_DEF_Q_NUM;
qm->free_qp = qm->qp_num;
}
ret = hisi_qm_start(qm);
if (ret) {
dev_err(&pdev->dev, "Can't start QM of Hisilicon!\n");
goto err_with_qm_init;
}
return 0;
err_with_qm_init:
hisi_qm_uninit(qm);
err_with_hisi_zip:
hisi_zip_remove_from_list(hisi_zip);
kfree(hisi_zip);
return ret;
}
static void hisi_zip_remove(struct pci_dev *pdev)
{
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
struct qm_info *qm = &hisi_zip->qm;
hisi_qm_stop(qm);
hisi_qm_uninit(qm);
hisi_zip_remove_from_list(hisi_zip);
kfree(hisi_zip);
}
static struct pci_driver hisi_zip_pci_driver = {
.name = "hisi_zip",
.id_table = hisi_zip_dev_ids,
.probe = hisi_zip_probe,
.remove = hisi_zip_remove,
};
static int __init hisi_zip_init(void)
{
int ret;
ret = pci_register_driver(&hisi_zip_pci_driver);
if (ret < 0) {
pr_err("zip: can't register hisi zip driver.\n");
return ret;
}
ret = hisi_zip_register_to_crypto();
if (ret < 0) {
pr_err("zip: can't register hisi zip to crypto.\n");
pci_unregister_driver(&hisi_zip_pci_driver);
return ret;
}
return 0;
}
static void __exit hisi_zip_exit(void)
{
hisi_zip_unregister_from_crypto();
pci_unregister_driver(&hisi_zip_pci_driver);
}
module_init(hisi_zip_init);
module_exit(hisi_zip_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册