diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 13323baf393e85ef69c0788e17c03e75e81547fc..37c52960084747148edfbbdd1e76e8a7824c2945 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -75,6 +75,11 @@ #define HPRE_BD_USR_MASK GENMASK(1, 0) #define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0) #define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0) +#define HPRE_PREFETCH_CFG 0x301130 +#define HPRE_SVA_PREFTCH_DFX 0x30115C +#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30))) +#define HPRE_PREFETCH_DISABLE BIT(30) +#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8)) #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044 #define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0) @@ -370,6 +375,47 @@ static void disable_flr_of_bme(struct hisi_qm *qm) writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); } +static void hpre_open_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (qm->ver < QM_HW_V3) + return; + + /* Enable prefetch */ + val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); + val &= HPRE_PREFETCH_ENABLE; + writel(val, qm->io_base + HPRE_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, + val, !(val & HPRE_PREFETCH_DISABLE), + HPRE_REG_RD_INTVRL_US, + HPRE_REG_RD_TMOUT_US); + if (ret) + pci_err(qm->pdev, "failed to open sva prefetch\n"); +} + +static void hpre_close_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (qm->ver < QM_HW_V3) + return; + + val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); + val |= HPRE_PREFETCH_DISABLE; + writel(val, qm->io_base + HPRE_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, + val, !(val & HPRE_SVA_DISABLE_READY), + HPRE_REG_RD_INTVRL_US, + HPRE_REG_RD_TMOUT_US); + if (ret) + pci_err(qm->pdev, "failed to close sva prefetch\n"); +} + static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -876,6 +922,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .clear_dev_hw_err_status = hpre_clear_hw_err_status, .log_dev_hw_err = hpre_log_hw_error, .open_axi_master_ooo = hpre_open_axi_master_ooo, + .open_sva_prefetch = hpre_open_sva_prefetch, + .close_sva_prefetch = hpre_close_sva_prefetch, .err_info_init = hpre_err_info_init, }; @@ -888,6 +936,8 @@ static int hpre_pf_probe_init(struct hpre *hpre) if (ret) return ret; + hpre_open_sva_prefetch(qm); + qm->err_ini = &hpre_err_ini; qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a7cd314073c2468ba7428b25239cadb90f87e920..fe35ea949a5bbf714981397928068df4b37109d1 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -95,6 +95,7 @@ #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 #define QM_QUE_ISO_CFG_V 0x0030 +#define QM_PAGE_SIZE 0x0034 #define QM_QUE_ISO_EN 0x100154 #define QM_CAPBILITY 0x100158 #define QM_QP_NUN_MASK GENMASK(10, 0) @@ -796,6 +797,32 @@ static void qm_init_qp_status(struct hisi_qp *qp) atomic_set(&qp_status->used, 0); } +static void qm_init_prefetch(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + u32 page_type = 0x0; + + if (qm->ver < QM_HW_V3) + return; + + switch (PAGE_SIZE) { + case SZ_4K: + page_type = 0x0; + break; + case SZ_16K: + page_type = 0x1; + break; + case SZ_64K: + page_type = 0x2; + break; + default: + dev_err(dev, "system page size is not support: %lu, default set to 4KB", + PAGE_SIZE); + } + + writel(page_type, qm->io_base + QM_PAGE_SIZE); +} + static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, u32 number) { @@ -2974,6 +3001,8 @@ static int __hisi_qm_start(struct hisi_qm *qm) if (ret) return ret; + qm_init_prefetch(qm); + writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); @@ -3898,6 +3927,9 @@ static int qm_soft_reset(struct hisi_qm *qm) return ret; } + if (qm->err_ini->close_sva_prefetch) + qm->err_ini->close_sva_prefetch(qm); + ret = qm_set_pf_mse(qm, false); if (ret) { pci_err(pdev, "Fails to disable pf MSE bit.\n"); @@ -3967,6 +3999,9 @@ static void qm_restart_prepare(struct hisi_qm *qm) { u32 value; + if (qm->err_ini->open_sva_prefetch) + qm->err_ini->open_sva_prefetch(qm); + if (qm->ver >= QM_HW_V3) return; diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index acefdf8b3a50e7f5718c113cbe5efa4ab3a0cb73..9048aa6e5f8abf4cb3a0e1c8b98a2d6c01ba0a64 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -188,6 +188,8 @@ struct hisi_qm_err_ini { void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); void (*open_axi_master_ooo)(struct hisi_qm *qm); void (*close_axi_master_ooo)(struct hisi_qm *qm); + void (*open_sva_prefetch)(struct hisi_qm *qm); + void (*close_sva_prefetch)(struct hisi_qm *qm); void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); void (*err_info_init)(struct hisi_qm *qm); }; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 6a4408ea18c1c8fd52d02ad8b8440887b1d17529..8ab4e67b8a417bff17723b16258c51bb6603013d 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -85,6 +85,12 @@ #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) #define SEC_CORE_INT_STATUS_M_ECC BIT(2) +#define SEC_PREFETCH_CFG 0x301130 +#define SEC_SVA_TRANS 0x301EC4 +#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11))) +#define SEC_PREFETCH_DISABLE BIT(1) +#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11)) + #define SEC_DELAY_10_US 10 #define SEC_POLL_TIMEOUT_US 1000 #define SEC_DBGFS_VAL_MAX_LEN 20 @@ -332,6 +338,42 @@ static u8 sec_get_endian(struct hisi_qm *qm) return SEC_64BE; } +static void sec_open_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (qm->ver < QM_HW_V3) + return; + + /* Enable prefetch */ + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val &= SEC_PREFETCH_ENABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, + val, !(val & SEC_PREFETCH_DISABLE), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to open sva prefetch\n"); +} + +static void sec_close_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val |= SEC_PREFETCH_DISABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, + val, !(val & SEC_SVA_DISABLE_READY), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to close sva prefetch\n"); +} + static int sec_engine_init(struct hisi_qm *qm) { int ret; @@ -751,6 +793,8 @@ static const struct hisi_qm_err_ini sec_err_ini = { .clear_dev_hw_err_status = sec_clear_hw_err_status, .log_dev_hw_err = sec_log_hw_error, .open_axi_master_ooo = sec_open_axi_master_ooo, + .open_sva_prefetch = sec_open_sva_prefetch, + .close_sva_prefetch = sec_close_sva_prefetch, .err_info_init = sec_err_info_init, }; @@ -766,6 +810,7 @@ static int sec_pf_probe_init(struct sec_dev *sec) if (ret) return ret; + sec_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 3e23f2a1cf5a523a02121c30973d46db42f94885..9e4c49cd6f3abee84747996e8afd028c4d4019af 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -97,6 +97,14 @@ #define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \ HZIP_RO_CNT_CLR_CE_EN) +#define HZIP_PREFETCH_CFG 0x3011B0 +#define HZIP_SVA_TRANS 0x3011C4 +#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0))) +#define HZIP_SVA_PREFETCH_DISABLE BIT(26) +#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30)) +#define HZIP_DELAY_1_US 1 +#define HZIP_POLL_TIMEOUT_US 1000 + static const char hisi_zip_name[] = "hisi_zip"; static struct dentry *hzip_debugfs_root; @@ -263,6 +271,45 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } +static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (qm->ver < QM_HW_V3) + return; + + /* Enable prefetch */ + val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); + val &= HZIP_PREFETCH_ENABLE; + writel(val, qm->io_base + HZIP_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG, + val, !(val & HZIP_SVA_PREFETCH_DISABLE), + HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to open sva prefetch\n"); +} + +static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (qm->ver < QM_HW_V3) + return; + + val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); + val |= HZIP_SVA_PREFETCH_DISABLE; + writel(val, qm->io_base + HZIP_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS, + val, !(val & HZIP_SVA_DISABLE_READY), + HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to close sva prefetch\n"); +} + static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) { void __iomem *base = qm->io_base; @@ -696,6 +743,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .log_dev_hw_err = hisi_zip_log_hw_error, .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, + .open_sva_prefetch = hisi_zip_open_sva_prefetch, + .close_sva_prefetch = hisi_zip_close_sva_prefetch, .err_info_init = hisi_zip_err_info_init, }; @@ -714,6 +763,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) qm->err_ini->err_info_init(qm); hisi_zip_set_user_domain_and_cache(qm); + hisi_zip_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm);