From 90b9620b26d51134e3aa660963907d887aa0bd59 Mon Sep 17 00:00:00 2001 From: Xiaoxu Zeng Date: Thu, 12 Oct 2023 10:49:01 +0800 Subject: [PATCH] The Hisilicon QM is a Queue Management module. In order to unify the interface between accelerator and software, a unified queue management module QM is used to interact with software. Each accelerator module integrates a QM. Software issues tasks to the SQ (Submmision Queue),and the QM obtains the address of the SQE (Submmision Queue Element). The BD (Buffer Description, same as SQE) information is sent to the accelerator. After the task processing is complete, the accelerator applies for a write-back address from the QM to write back the SQ. Signed-off-by: Xiaoxu Zeng --- core/arch/arm/plat-d06/conf.mk | 1 + core/drivers/crypto/hisilicon/hisi_qm.c | 791 ++++++++++++++++++ .../crypto/hisilicon/include/hisi_qm.h | 260 ++++++ core/drivers/crypto/hisilicon/sub.mk | 3 + core/drivers/crypto/sub.mk | 2 + 5 files changed, 1057 insertions(+) create mode 100644 core/drivers/crypto/hisilicon/hisi_qm.c create mode 100644 core/drivers/crypto/hisilicon/include/hisi_qm.h create mode 100644 core/drivers/crypto/hisilicon/sub.mk diff --git a/core/arch/arm/plat-d06/conf.mk b/core/arch/arm/plat-d06/conf.mk index 98a8fa01bbe..fc3157cc0cf 100644 --- a/core/arch/arm/plat-d06/conf.mk +++ b/core/arch/arm/plat-d06/conf.mk @@ -8,6 +8,7 @@ CFG_WITH_PAGER ?= n CFG_WITH_SOFTWARE_PRNG ?= y CFG_WITH_STATS ?= y CFG_TEE_CORE_EMBED_INTERNAL_TESTS ?= y +CFG_HISILICON_CRYPTO_DRIVER ?= y $(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y) $(call force,CFG_WITH_ARM_TRUSTED_FW,y) diff --git a/core/drivers/crypto/hisilicon/hisi_qm.c b/core/drivers/crypto/hisilicon/hisi_qm.c new file mode 100644 index 00000000000..6e1744d15e5 --- /dev/null +++ b/core/drivers/crypto/hisilicon/hisi_qm.c @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: BSD-2-Clause +/* + * Copyright 2022-2023 HiSilicon Limited. + * Kunpeng hardware accelerator queue management module. + */ +#include "hisi_qm.h" + +#define QM_FVT_CFG_RDY_BIT 0x1 +/* doorbell */ +#define QM_DOORBELL_SQ_CQ_BASE 0x1000 +#define QM_DB_CMD_SHIFT 12 +#define QM_DB_RAND_DATA_SHIFT 16 +#define QM_DB_INDEX_SHIFT 32 +#define QM_DB_PRIORITY_SHIFT 48 +#define QM_DB_RAND_DATA 0x5a +#define QM_DOORBELL_CMD_SQ 0 +#define QM_DOORBELL_CMD_CQ 1 +/* mailbox */ +#define QM_MAILBOX_BASE 0x300 +#define QM_MAILBOX_DATA_ADDR_L 0x304 +#define QM_MAILBOX_DATA_ADDR_H 0x308 +#define QM_MB_BUSY_SHIFT 13 +#define QM_MB_BUSY_BIT BIT32(QM_MB_BUSY_SHIFT) +#define QM_MB_OP_SHIFT 14 +#define QM_MB_OP_WR 0 +#define QM_MB_OP_RD 1 +/* XQC_VFT */ +#define QM_VFT_CFG_OP_ENABLE 0x100054 +#define QM_VFT_CFG_OP_WR 0x100058 +#define QM_VFT_CFG_TYPE 0x10005c +#define QM_VFT_CFG_ADDRESS 0x100060 +#define QM_VFT_CFG_DATA_L 0x100064 +#define QM_VFT_CFG_DATA_H 0x100068 +#define QM_VFT_CFG_RDY 0x10006c +#define QM_SQC_VFT 0 +#define QM_CQC_VFT 1 +#define QM_SQC_VFT_START_SQN_SHIFT 28 +#define QM_SQC_VFT_VALID BIT64(44) +#define QM_SQC_VFT_SQ_NUM_SHIFT 45 +#define QM_CQC_VFT_VALID BIT(28) +#define QM_VFT_WRITE 0 +#define QM_VFT_READ 1 +#define QM_SQC_VFT_BASE_MASK 0x3ff +#define QM_SQC_VFT_NUM_MASK 0x3ff +/* QM INIT */ +#define QM_MEM_START_INIT 0x100040 +#define QM_MEM_INIT_DONE 0x100044 +#define QM_VF_AEQ_INT_MASK 0x4 +#define QM_VF_AEQ_INT_MASK_EN 0x1 +#define QM_VF_EQ_INT_MASK 0xc +#define QM_VF_EQ_INT_MASK_EN 0x1 +#define QM_ARUSER_M_CFG_1 0x100088 +#define QM_ARUSER_M_CFG_ENABLE 0x100090 +#define QM_AWUSER_M_CFG_1 0x100098 +#define QM_AWUSER_M_CFG_ENABLE 0x1000a0 +#define QM_AXUSER_CFG 0x40001070 +#define AXUSER_M_CFG_ENABLE 0x7ffffc +#define QM_AXI_M_CFG 0x1000ac +#define AXI_M_CFG 0xffff +#define QM_PEH_AXUSER_CFG 0x1000cc +#define PEH_AXUSER_CFG 0x400801 +#define QM_CACHE_CTL 0x100050 +#define QM_CACHE_CFG 0x4893 +#define QM_CACHE_WB_START 0x100204 +#define QM_CACHE_WB_DONE 0x100208 +/* XQC shift */ +#define QM_SQ_SQE_SIZE_SHIFT 12 +#define QM_SQ_ORDER_SHIFT 4 +#define QM_SQ_TYPE_SHIFT 8 +#define QM_CQE_SIZE 4 +#define QM_CQ_CQE_SIZE_SHIFT 12 +/* CQE */ +#define QM_CQE_PHASE(cqe) (((cqe)->w7) & QM_FVT_CFG_RDY_BIT) + +enum qm_mailbox_common_cmd { + QM_MB_CMD_SQC = 0x0, + QM_MB_CMD_CQC, + QM_MB_CMD_EQC, + QM_MB_CMD_AEQC, + QM_MB_CMD_SQC_BT, + QM_MB_CMD_CQC_BT, + QM_MB_CMD_SQC_VFT, +}; + +enum qm_mailbox_cmd_v3 { + QM_MB_CM_CLOSE_QM = 0x7, + QM_MB_CMD_CLOSE_QP, + QM_MB_CMD_FLUSH_QM, + QM_MB_CMD_FLUSH_QP, + QM_MB_CMD_SRC = 0xc, + QM_MB_CMD_DST, + QM_MB_CMD_STOP_QM, +}; + +struct qm_mailbox { + union { + struct { + uint16_t w0; + uint16_t queue; + unsigned int base_l; + unsigned int base_h; + unsigned int token; + }; + uint64_t x[2]; + }; +}; + +struct qm_dfx_registers { + const char *reg_name; + unsigned int reg_offset; +}; + +static const struct qm_dfx_registers qm_dfx_regs[] = { + { .reg_name = "QM_ECC_1BIT_CNT ", .reg_offset = 0x104000 }, + { .reg_name = "QM_ECC_MBIT_CNT ", .reg_offset = 0x104008 }, + { .reg_name = "QM_DFX_MB_CNT ", .reg_offset = 0x104018 }, + { .reg_name = "QM_DFX_DB_CNT ", .reg_offset = 0x104028 }, + { .reg_name = "QM_DFX_SQE_CNT ", .reg_offset = 0x104038 }, + { .reg_name = "QM_DFX_CQE_CNT ", .reg_offset = 0x104048 }, + { .reg_name = "QM_DFX_SEND_SQE_TO_ACC_CNT", .reg_offset = 0x104050 }, + { .reg_name = "QM_DFX_WB_SQE_FROM_ACC_CNT", .reg_offset = 0x104058 }, + { .reg_name = "QM_DFX_ACC_FINISH_CNT ", .reg_offset = 0x104060 }, + { .reg_name = "QM_DFX_CQE_ERR_CNT ", .reg_offset = 0x1040b4 }, + { .reg_name = NULL, 0 } +}; + +void hisi_qm_get_version(struct hisi_qm *qm) +{ + qm->version = io_read32(qm->io_base + HISI_QM_REVISON_ID_BASE) & + HISI_QM_REVISON_ID_MASK; +} + +static void qm_db(struct hisi_qm *qm, uint16_t qn, uint8_t cmd, uint16_t index, + uint8_t priority) +{ + uint64_t doorbell = 0; + + doorbell = qn | SHIFT_U64(cmd, QM_DB_CMD_SHIFT) | + SHIFT_U64(QM_DB_RAND_DATA, QM_DB_RAND_DATA_SHIFT) | + SHIFT_U64(index, QM_DB_INDEX_SHIFT) | + SHIFT_U64(priority, QM_DB_PRIORITY_SHIFT); + + io_write64(qm->io_base + QM_DOORBELL_SQ_CQ_BASE, doorbell); +} + +static enum hisi_drv_status qm_wait_mb_ready(struct hisi_qm *qm) +{ + unsigned int val = 0; + + /* return 0 mailbox ready, HISI_QM_DRVCRYPT_ETMOUT hardware timeout */ + if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_MAILBOX_BASE, val, + !(val & QM_MB_BUSY_BIT), POLL_PERIOD, + POLL_TIMEOUT)) { + return HISI_QM_DRVCRYPT_ETMOUT; + } + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +static void qm_mb_write(struct hisi_qm *qm, struct qm_mailbox *mb) +{ + vaddr_t dst = qm->io_base + QM_MAILBOX_BASE; + + write_64bit_pair(dst, mb->x[0], mb->x[1]); +} + +static enum hisi_drv_status qm_mb(struct hisi_qm *qm, uint8_t cmd, + vaddr_t dma_addr, uint16_t qn, uint8_t op) +{ + struct qm_mailbox mb = { }; + + mb.w0 = cmd | SHIFT_U32(op, QM_MB_OP_SHIFT) | + BIT32(QM_MB_BUSY_SHIFT); + mb.queue = qn; + reg_pair_from_64(dma_addr, &mb.base_h, &mb.base_l); + mb.token = 0; + + if (qm_wait_mb_ready(qm)) { + EMSG("QM mailbox is busy"); + return HISI_QM_DRVCRYPT_EBUSY; + } + + qm_mb_write(qm, &mb); + + if (qm_wait_mb_ready(qm)) { + EMSG("QM mailbox operation timeout"); + return HISI_QM_DRVCRYPT_EBUSY; + } + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +static void qm_cfg_vft_data(struct hisi_qm *qm, uint8_t vft_type, + unsigned int base, unsigned int number) +{ + unsigned int data_h = 0; + unsigned int data_l = 0; + uint64_t data = 0; + + switch (vft_type) { + case QM_SQC_VFT: + data = SHIFT_U64(base, QM_SQC_VFT_START_SQN_SHIFT) | + QM_SQC_VFT_VALID | + SHIFT_U64((number - 1), QM_SQC_VFT_SQ_NUM_SHIFT); + break; + case QM_CQC_VFT: + data = QM_CQC_VFT_VALID; + break; + default: + panic("Invalid vft type"); + } + + reg_pair_from_64(data, &data_h, &data_l); + io_write32(qm->io_base + QM_VFT_CFG_DATA_L, data_l); + io_write32(qm->io_base + QM_VFT_CFG_DATA_H, data_h); +} + +static enum hisi_drv_status qm_set_vft_common(struct hisi_qm *qm, + uint8_t vft_type, + unsigned int function, + unsigned int base, + unsigned int num) +{ + unsigned int val = 0; + + if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val, + val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD, + POLL_TIMEOUT)) { + EMSG("QM VFT is not ready"); + return HISI_QM_DRVCRYPT_EBUSY; + } + + io_write32(qm->io_base + QM_VFT_CFG_OP_WR, QM_VFT_WRITE); + io_write32(qm->io_base + QM_VFT_CFG_TYPE, vft_type); + io_write32(qm->io_base + QM_VFT_CFG_ADDRESS, function); + qm_cfg_vft_data(qm, vft_type, base, num); + io_write32(qm->io_base + QM_VFT_CFG_RDY, 0x0); + io_write32(qm->io_base + QM_VFT_CFG_OP_ENABLE, QM_FVT_CFG_RDY_BIT); + + if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val, + val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD, + POLL_TIMEOUT)) { + EMSG("QM VFT is not ready"); + return HISI_QM_DRVCRYPT_EBUSY; + } + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +static enum hisi_drv_status qm_set_xqc_vft(struct hisi_qm *qm, + unsigned int function, + unsigned int base, unsigned int num) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + int i = 0; + + if (!num) { + EMSG("Invalid sq num"); + return HISI_QM_DRVCRYPT_EINVAL; + } + + for (i = QM_SQC_VFT; i <= QM_CQC_VFT; i++) { + ret = qm_set_vft_common(qm, i, function, base, num); + if (ret) { + EMSG("QM set type%d fail", i); + return ret; + } + } + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +static enum hisi_drv_status qm_get_vft(struct hisi_qm *qm, unsigned int *base, + unsigned int *num) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + uint64_t sqc_vft = 0; + + ret = qm_mb(qm, QM_MB_CMD_SQC_VFT, 0, 0, QM_MB_OP_RD); + if (ret) + return ret; + + sqc_vft = io_read64(qm->io_base + QM_MAILBOX_DATA_ADDR_L); + *base = (sqc_vft >> QM_SQC_VFT_START_SQN_SHIFT) & QM_SQC_VFT_BASE_MASK; + *num = ((sqc_vft >> QM_SQC_VFT_SQ_NUM_SHIFT) & QM_SQC_VFT_NUM_MASK) + 1; + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +static void qp_memory_uninit(struct hisi_qm *qm, unsigned int id) +{ + struct hisi_qp *qp = &qm->qp_array[id]; + + free(qp->sqe); + free(qp->cqe); +} + +static enum hisi_drv_status qp_memory_init(struct hisi_qm *qm, unsigned int id) +{ + size_t sq_size = qm->sqe_size * HISI_QM_Q_DEPTH; + size_t cq_size = sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH; + struct hisi_qp *qp = &qm->qp_array[id]; + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + + qp->sqe = memalign(HISI_QM_ALIGN128, sq_size); + if (!qp->sqe) { + EMSG("Fail to malloc sq[%"PRIu32"]", id); + return HISI_QM_DRVCRYPT_ENOMEM; + } + qp->sqe_dma = virt_to_phys(qp->sqe); + qp->cqe = memalign(HISI_QM_ALIGN32, cq_size); + if (!qp->cqe) { + EMSG("Fail to malloc cq[%"PRIu32"]", id); + ret = HISI_QM_DRVCRYPT_ENOMEM; + goto free_sqe; + } + qp->cqe_dma = virt_to_phys(qp->cqe); + + qp->qp_id = id; + qp->qm = qm; + return HISI_QM_DRVCRYPT_NO_ERR; + +free_sqe: + free(qp->sqe); + return ret; +} + +static void qm_memory_uninit(struct hisi_qm *qm) +{ + unsigned int i = 0; + + for (i = 0; i < qm->qp_num; i++) + qp_memory_uninit(qm, i); + + free(qm->qp_array); + free(qm->sqc); + free(qm->cqc); +} + +static enum hisi_drv_status qm_memory_init(struct hisi_qm *qm) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + size_t sqc_size = 0; + size_t cqc_size = 0; + unsigned int i = 0; + int j = 0; + + sqc_size = sizeof(struct qm_sqc) * qm->qp_num; + cqc_size = sizeof(struct qm_cqc) * qm->qp_num; + + qm->sqc = memalign(HISI_QM_ALIGN32, sqc_size); + if (!qm->sqc) { + EMSG("Fail to malloc sqc"); + return HISI_QM_DRVCRYPT_ENOMEM; + } + qm->sqc_dma = virt_to_phys(qm->sqc); + + qm->cqc = memalign(HISI_QM_ALIGN32, cqc_size); + if (!qm->cqc) { + EMSG("Fail to malloc cqc"); + ret = HISI_QM_DRVCRYPT_ENOMEM; + goto free_sqc; + } + qm->cqc_dma = virt_to_phys(qm->cqc); + + qm->qp_array = calloc(qm->qp_num, sizeof(struct hisi_qp)); + if (!qm->qp_array) { + EMSG("Fail to malloc qp_array"); + ret = HISI_QM_DRVCRYPT_ENOMEM; + goto free_cqc; + } + + for (i = 0; i < qm->qp_num; i++) { + ret = qp_memory_init(qm, i); + if (ret) { + ret = HISI_QM_DRVCRYPT_ENOMEM; + goto free_qp_mem; + } + } + + return HISI_QM_DRVCRYPT_NO_ERR; + +free_qp_mem: + for (j = (int)i - 1; j >= 0; j--) + qp_memory_uninit(qm, j); + free(qm->qp_array); +free_cqc: + free(qm->cqc); +free_sqc: + free(qm->sqc); + return ret; +} + +enum hisi_drv_status hisi_qm_init(struct hisi_qm *qm) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + + if (qm->fun_type == HISI_QM_HW_VF) { + ret = qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) { + EMSG("Fail to get function vft config"); + return ret; + } + } + + if (qm->qp_num == 0 || qm->sqe_size == 0) { + EMSG("Invalid qm parameters"); + return HISI_QM_DRVCRYPT_EINVAL; + } + + ret = qm_memory_init(qm); + if (ret) + return ret; + + qm->qp_in_used = 0; + qm->qp_idx = 0; + mutex_init(&qm->qp_lock); + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +static void qm_cache_writeback(struct hisi_qm *qm) +{ + unsigned int val = 0; + + io_write32(qm->io_base + QM_CACHE_WB_START, QM_FVT_CFG_RDY_BIT); + + if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_CACHE_WB_DONE, val, + val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD, + POLL_TIMEOUT)) + panic("QM writeback sqc cache fail"); +} + +void hisi_qm_uninit(struct hisi_qm *qm) +{ + qm_cache_writeback(qm); + qm_memory_uninit(qm); + mutex_destroy(&qm->qp_lock); +} + +static enum hisi_drv_status qm_hw_mem_reset(struct hisi_qm *qm) +{ + unsigned int val = 0; + + io_write32(qm->io_base + QM_MEM_START_INIT, QM_FVT_CFG_RDY_BIT); + + if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_MEM_INIT_DONE, val, + val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD, + POLL_TIMEOUT)) + return HISI_QM_DRVCRYPT_EBUSY; + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +static enum hisi_drv_status qm_func_vft_cfg(struct hisi_qm *qm) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + unsigned int q_base = qm->qp_num; + unsigned int act_q_num = 0; + unsigned int i = 0; + unsigned int j = 0; + + if (qm->vfs_num == 0) + return HISI_QM_DRVCRYPT_NO_ERR; + + if (qm->vfs_num > HISI_QM_MAX_VFS_NUM) { + EMSG("Invalid QM vfs_num"); + return HISI_QM_DRVCRYPT_EINVAL; + } + + for (i = 1; i <= qm->vfs_num; i++) { + act_q_num = HISI_QM_VF_Q_NUM; + ret = qm_set_xqc_vft(qm, i, q_base, act_q_num); + if (ret) { + for (j = 1; j < i; j++) + (void)qm_set_xqc_vft(qm, j, 0, 0); + return ret; + } + q_base += act_q_num; + } + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +enum hisi_drv_status hisi_qm_start(struct hisi_qm *qm) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + + if (qm->fun_type == HISI_QM_HW_PF) { + ret = qm_hw_mem_reset(qm); + if (ret) { + EMSG("Fail to reset qm hardware mem"); + return ret; + } + + ret = qm_set_xqc_vft(qm, 0, qm->qp_base, qm->qp_num); + if (ret) { + EMSG("Fail to set PF xqc_vft"); + return ret; + } + + ret = qm_func_vft_cfg(qm); + if (ret) { + EMSG("Fail to set VF xqc_vft"); + return ret; + } + } + + ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, QM_MB_OP_WR); + if (ret) { + EMSG("Fail to set sqc_bt"); + return ret; + } + + ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, QM_MB_OP_WR); + if (ret) { + EMSG("Fail to set cqc_bt"); + return ret; + } + + /* security mode does not support msi */ + io_write32(qm->io_base + QM_VF_AEQ_INT_MASK, QM_VF_AEQ_INT_MASK_EN); + io_write32(qm->io_base + QM_VF_EQ_INT_MASK, QM_VF_EQ_INT_MASK_EN); + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +void hisi_qm_dev_init(struct hisi_qm *qm) +{ + if (qm->fun_type == HISI_QM_HW_VF) + return; + + /* qm user domain */ + io_write32(qm->io_base + QM_ARUSER_M_CFG_1, QM_AXUSER_CFG); + io_write32(qm->io_base + QM_ARUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE); + io_write32(qm->io_base + QM_AWUSER_M_CFG_1, QM_AXUSER_CFG); + io_write32(qm->io_base + QM_AWUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE); + /* qm cache */ + io_write32(qm->io_base + QM_AXI_M_CFG, AXI_M_CFG); + + if (qm->version == HISI_QM_HW_V2) { + /* disable FLR triggered by BME(bus master enable) */ + io_write32(qm->io_base + QM_PEH_AXUSER_CFG, PEH_AXUSER_CFG); + /* set sec sqc and cqc cache wb threshold 4 */ + io_write32(qm->io_base + QM_CACHE_CTL, QM_CACHE_CFG); + } + /* disable qm ras */ + io_write32(qm->io_base + HISI_QM_ABNML_INT_MASK, + HISI_QM_ABNML_INT_MASK_CFG); +} + +static enum hisi_drv_status qm_sqc_cfg(struct hisi_qp *qp) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + struct hisi_qm *qm = qp->qm; + struct qm_sqc *sqc = NULL; + paddr_t sqc_dma = 0; + + sqc = memalign(HISI_QM_ALIGN32, sizeof(struct qm_sqc)); + if (!sqc) + return HISI_QM_DRVCRYPT_ENOMEM; + + sqc_dma = virt_to_phys(sqc); + + memzero_explicit(sqc, sizeof(struct qm_sqc)); + reg_pair_from_64(qp->sqe_dma, &sqc->base_h, &sqc->base_l); + sqc->dw3 = (HISI_QM_Q_DEPTH - 1) | + SHIFT_U32(qm->sqe_log2_size, QM_SQ_SQE_SIZE_SHIFT); + sqc->rand_data = QM_DB_RAND_DATA; + sqc->cq_num = qp->qp_id; + sqc->w13 = BIT32(QM_SQ_ORDER_SHIFT) | + SHIFT_U32(qp->sq_type, QM_SQ_TYPE_SHIFT); + + ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp->qp_id, QM_MB_OP_WR); + free(sqc); + + return ret; +} + +static enum hisi_drv_status qm_cqc_cfg(struct hisi_qp *qp) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + struct hisi_qm *qm = qp->qm; + struct qm_cqc *cqc = NULL; + paddr_t cqc_dma = 0; + + cqc = memalign(HISI_QM_ALIGN32, sizeof(struct qm_cqc)); + if (!cqc) + return HISI_QM_DRVCRYPT_ENOMEM; + + cqc_dma = virt_to_phys(cqc); + + memzero_explicit(cqc, sizeof(struct qm_cqc)); + reg_pair_from_64(qp->cqe_dma, &cqc->base_h, &cqc->base_l); + cqc->dw3 = (HISI_QM_Q_DEPTH - 1) | + SHIFT_U32(QM_CQE_SIZE, QM_CQ_CQE_SIZE_SHIFT); + cqc->rand_data = QM_DB_RAND_DATA; + cqc->dw6 = PHASE_DEFAULT_VAL; + + ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp->qp_id, QM_MB_OP_WR); + free(cqc); + + return ret; +} + +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type) +{ + struct hisi_qp *qp = NULL; + + mutex_lock(&qm->qp_lock); + if (qm->qp_in_used == qm->qp_num) { + EMSG("All %"PRIu32" queues of QM are busy", qm->qp_num); + goto err_proc; + } + + if (qm->qp_idx == qm->qp_num - 1) + qm->qp_idx = 0; + else + qm->qp_idx++; + + qp = &qm->qp_array[qm->qp_idx]; + memzero_explicit(qp->cqe, sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH); + qp->sq_type = sq_type; + qp->sq_tail = 0; + qp->cq_head = 0; + qp->cqc_phase = true; + + if (qm_sqc_cfg(qp)) { + EMSG("Fail to set qp[%"PRIu32"] sqc", qp->qp_id); + goto err_proc; + } + + if (qm_cqc_cfg(qp)) { + EMSG("Fail to set qp[%"PRIu32"] cqc", qp->qp_id); + goto err_proc; + } + + qm->qp_in_used++; + mutex_unlock(&qm->qp_lock); + return qp; + +err_proc: + qp->sq_type = 0; + qp->cqc_phase = false; + mutex_unlock(&qm->qp_lock); + return NULL; +} + +void hisi_qm_release_qp(struct hisi_qp *qp) +{ + struct hisi_qm *qm = NULL; + + if (!qp) { + EMSG("qp is NULL"); + return; + } + + qm = qp->qm; + mutex_lock(&qm->qp_lock); + qm->qp_in_used--; + mutex_unlock(&qm->qp_lock); +} + +static void qm_sq_tail_update(struct hisi_qp *qp) +{ + if (qp->sq_tail == HISI_QM_Q_DEPTH - 1) + qp->sq_tail = 0; + else + qp->sq_tail++; +} + +/* + * One task thread will just bind to one hardware queue, and + * hardware does not support msi. So we have no lock here. + */ +enum hisi_drv_status hisi_qp_send(struct hisi_qp *qp, void *msg) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + struct hisi_qm *qm = NULL; + void *sqe = NULL; + + if (!qp) { + EMSG("qp is NULL"); + return HISI_QM_DRVCRYPT_EINVAL; + } + + qm = qp->qm; + ret = qm->dev_status_check(qm); + if (ret) + return ret; + + sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * qp->sq_tail); + memzero_explicit(sqe, qm->sqe_size); + + ret = qp->fill_sqe(sqe, msg); + if (ret) { + EMSG("Fail to fill sqe"); + return ret; + } + + qm_sq_tail_update(qp); + + dsb(); + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_SQ, qp->sq_tail, 0); + + return HISI_QM_DRVCRYPT_NO_ERR; +} + +static void qm_cq_head_update(struct hisi_qp *qp) +{ + if (qp->cq_head == HISI_QM_Q_DEPTH - 1) { + qp->cqc_phase = !qp->cqc_phase; + qp->cq_head = 0; + } else { + qp->cq_head++; + } +} + +static enum hisi_drv_status hisi_qp_recv(struct hisi_qp *qp, void *msg) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + struct hisi_qm *qm = qp->qm; + struct qm_cqe *cqe = NULL; + void *sqe = NULL; + + ret = qm->dev_status_check(qm); + if (ret) + return ret; + + cqe = qp->cqe + qp->cq_head; + if (QM_CQE_PHASE(cqe) == qp->cqc_phase) { + dsb_osh(); + sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * cqe->sq_head); + ret = qp->parse_sqe(sqe, msg); + qm_cq_head_update(qp); + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->cq_head, 0); + if (ret) { + EMSG("Fail to parse sqe"); + return ret; + } + } else { + return HISI_QM_DRVCRYPT_NO_ERR; + } + + return HISI_QM_DRVCRYPT_RECV_DONE; +} + +static void qm_dfx_dump(struct hisi_qm *qm) +{ + const struct qm_dfx_registers *regs = qm_dfx_regs; + __maybe_unused unsigned int val = 0; + + if (qm->fun_type == HISI_QM_HW_VF) + return; + + while (regs->reg_name) { + val = io_read32(qm->io_base + regs->reg_offset); + EMSG("%s= 0x%" PRIx32, regs->reg_name, val); + regs++; + } +} + +enum hisi_drv_status hisi_qp_recv_sync(struct hisi_qp *qp, void *msg) +{ + enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; + unsigned int cnt = 0; + + if (!qp) { + EMSG("qp is NULL"); + return HISI_QM_DRVCRYPT_EINVAL; + } + + while (true) { + ret = hisi_qp_recv(qp, msg); + if (ret == HISI_QM_DRVCRYPT_NO_ERR) { + cnt++; + if (cnt > HISI_QM_RECV_SYNC_TIMEOUT) { + EMSG("qm recv task timeout"); + qm_dfx_dump(qp->qm); + return HISI_QM_DRVCRYPT_ETMOUT; + } + } else { + if (ret == HISI_QM_DRVCRYPT_RECV_DONE) + return HISI_QM_DRVCRYPT_NO_ERR; + + EMSG("qm recv task error"); + qm_dfx_dump(qp->qm); + return ret; + } + } +} diff --git a/core/drivers/crypto/hisilicon/include/hisi_qm.h b/core/drivers/crypto/hisilicon/include/hisi_qm.h new file mode 100644 index 00000000000..2256681989c --- /dev/null +++ b/core/drivers/crypto/hisilicon/include/hisi_qm.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (c) 2022-2023, Huawei Technologies Co., Ltd + */ +#ifndef __HISI_QM_H__ +#define __HISI_QM_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HISI_QM_HW_V2 0x21 +#define HISI_QM_HW_V3 0x30 +#define HISI_QM_MAX_VFS_NUM 63 +#define HISI_QM_PF_Q_BASE 0 +#define HISI_QM_PF_Q_NUM 64 +#define HISI_QM_VF_Q_NUM 15 +#define HISI_QM_Q_DEPTH 8 +#define PHASE_DEFAULT_VAL 0x1 + +#define HISI_QM_ABNML_INT_MASK 0x100004 +#define HISI_QM_ABNML_INT_MASK_CFG 0x7fff +#define HISI_QM_ABNML_INT_SRC 0x100000 +#define HISI_QM_HPRE_NFE_INT_MASK 0x6fb7 +#define HISI_QM_SEC_NFE_INT_MASK 0x6ff7 +#define HISI_QM_INVALID_DB BIT(12) +#define HISI_QM_REVISON_ID_BASE 0x1000dc +#define HISI_QM_REVISON_ID_MASK GENMASK_32(7, 0) +#define POLL_PERIOD 10 +#define POLL_TIMEOUT 1000 +#define HISI_QM_RECV_SYNC_TIMEOUT 0xfffffff +#define HISI_QM_ALIGN128 128 +#define HISI_QM_ALIGN32 32 + +enum qm_fun_type { + HISI_QM_HW_PF, + HISI_QM_HW_VF, +}; + +enum qm_sq_type { + HISI_QM_CHANNEL_TYPE0 = 0, + HISI_QM_CHANNEL_TYPE1, + HISI_QM_CHANNEL_TYPE2, +}; + +struct qm_sqc { + uint16_t head; + uint16_t tail; + unsigned int base_l; + unsigned int base_h; + /* + * qes : 12 + * sqe : 4 + * rsv(stash_nid/stash_en) : 16 + */ + unsigned int dw3; + uint16_t rand_data; + uint16_t rsv0; + uint16_t pasid; + /* + * rsv : 5 + * head_sig : 1 + * tail_sig : 1 + * pasid_en : 1 + * rsv : 8 + */ + uint16_t w11; + uint16_t cq_num; + /* + * priority(Credit): 4 + * order(order/fc/close/rsv) : 4 + * type : 4 + * rsv : 4 + */ + uint16_t w13; + unsigned int rsv1; +}; + +struct qm_cqc { + uint16_t head; + uint16_t tail; + unsigned int base_l; + unsigned int base_h; + /* + * qes : 12 + * cqe_size : 4 + * rsv(stash_nid/stash_en) : 16 + */ + unsigned int dw3; + uint16_t rand_data; + uint16_t rsv0; + uint16_t pasid; + /* + * pasid_en : 1 + * rsv : 4 + * head_sig : 1 + * tail_sig : 1 + * rsv : 9 + */ + uint16_t w11; + /* + * phase : 1 + * c_flag : 1 + * stash_vld : 1 + */ + unsigned int dw6; + unsigned int rsv1; +}; + +struct qm_cqe { + unsigned int rsv0; + uint16_t cmd_id; + uint16_t rsv1; + uint16_t sq_head; + uint16_t sq_id; + uint16_t rsv2; + /* + * p : 1 + * status : 15 + */ + uint16_t w7; +}; + +struct hisi_qp { + struct hisi_qm *qm; + unsigned int qp_id; + uint8_t sq_type; + uint16_t sq_tail; + uint16_t cq_head; + bool cqc_phase; + + void *sqe; + struct qm_cqe *cqe; + paddr_t sqe_dma; + paddr_t cqe_dma; + + enum hisi_drv_status (*fill_sqe)(void *sqe, void *msg); + enum hisi_drv_status (*parse_sqe)(void *sqe, void *msg); +}; + +struct hisi_qm { + enum qm_fun_type fun_type; + vaddr_t io_base; + unsigned int io_size; + unsigned int vfs_num; + unsigned int version; + + struct qm_sqc *sqc; + struct qm_cqc *cqc; + paddr_t sqc_dma; + paddr_t cqc_dma; + unsigned int sqe_size; + unsigned int sqe_log2_size; + unsigned int qp_base; + unsigned int qp_num; + unsigned int qp_in_used; + unsigned int qp_idx; + struct hisi_qp *qp_array; + struct mutex qp_lock; /* protect the qp instance */ + + enum hisi_drv_status (*dev_status_check)(struct hisi_qm *qm); +}; + +enum hisi_drv_status { + HISI_QM_DRVCRYPT_NO_ERR = 0, + HISI_QM_DRVCRYPT_FAIL = 1, + HISI_QM_DRVCRYPT_EIO = 5, + HISI_QM_DRVCRYPT_EAGAIN = 11, + HISI_QM_DRVCRYPT_ENOMEM = 12, + HISI_QM_DRVCRYPT_EFAULT = 14, + HISI_QM_DRVCRYPT_EBUSY = 16, + HISI_QM_DRVCRYPT_ENODEV = 19, + HISI_QM_DRVCRYPT_EINVAL = 22, + HISI_QM_DRVCRYPT_ETMOUT = 110, + HISI_QM_DRVCRYPT_RECV_DONE = 175, + HISI_QM_DRVCRYPT_ENOPROC, + HISI_QM_DRVCRYPT_IN_EPARA, + HISI_QM_DRVCRYPT_VERIFY_ERR, + HISI_QM_DRVCRYPT_HW_EACCESS, +}; + +struct acc_device { + struct hisi_qm qm; + vaddr_t io_base; + unsigned int io_size; + unsigned int vfs_num; + unsigned int endian; + enum qm_fun_type fun_type; + SLIST_ENTRY(acc_device) link; +}; + +/** + * @Description: Get the version information of QM hardware + * @param qm: Handle of Queue Management module + */ +void hisi_qm_get_version(struct hisi_qm *qm); + +/** + * @Description: Init QM for Kunpeng drv + * @param qm: Handle of Queue Management module + * @return success: 0,fail: HISI_QM_DRVCRYPT_EBUSY/HISI_QM_DRVCRYPT_EINVAL + */ +enum hisi_drv_status hisi_qm_init(struct hisi_qm *qm); + +/** + * @Description:deinit QM for Kunpeng drv + * @param qm: Handle of Queue Management module + */ +void hisi_qm_uninit(struct hisi_qm *qm); + +/** + * @Description: Start QM for Kunpeng drv + * @param qm: Handle of Queue Management module + */ +enum hisi_drv_status hisi_qm_start(struct hisi_qm *qm); + +/** + * @Description: Config QM for Kunpeng drv + * @param qm: Handle of Queue Management module + */ +void hisi_qm_dev_init(struct hisi_qm *qm); + +/** + * @Description: Create Queue Pair, allocated to PF/VF for configure + * and service use. Each QP includes one SQ and one CQ + * @param qm: Handle of Queue Management module + * @param sq_type: Accelerator specific algorithm type in sqc + * @return success: Handle of QP,fail: NULL + */ +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type); + +/** + * @Description:Release Queue Pair + * @param qp: Handle of Queue Pair + */ +void hisi_qm_release_qp(struct hisi_qp *qp); + +/** + * @Description: Send SQE(Submmision Queue Element) to Kunpeng dev + * @param qp: Handle of Queue Pair + * @param msg: The message + * @return success: 0,fail: HISI_QM_DRVCRYPT_EINVAL + */ +enum hisi_drv_status hisi_qp_send(struct hisi_qp *qp, void *msg); + +/** + * @Description: Recevice result from Kunpeng dev + * @param qp: Handle of Queue Pair + * @param msg: The message + * @return success: 0,fail: HISI_QM_DRVCRYPT_EINVAL/ETMOUT + */ +enum hisi_drv_status hisi_qp_recv_sync(struct hisi_qp *qp, void *msg); + +#endif diff --git a/core/drivers/crypto/hisilicon/sub.mk b/core/drivers/crypto/hisilicon/sub.mk new file mode 100644 index 00000000000..9a1747084ff --- /dev/null +++ b/core/drivers/crypto/hisilicon/sub.mk @@ -0,0 +1,3 @@ +incdirs-y += include + +srcs-y += hisi_qm.c diff --git a/core/drivers/crypto/sub.mk b/core/drivers/crypto/sub.mk index 3c26eda79a2..71cb6bd6905 100644 --- a/core/drivers/crypto/sub.mk +++ b/core/drivers/crypto/sub.mk @@ -11,3 +11,5 @@ subdirs-$(CFG_STM32_CRYPTO_DRIVER) += stm32 subdirs-$(CFG_ASPEED_CRYPTO_DRIVER) += aspeed subdirs-$(CFG_VERSAL_CRYPTO_DRIVER) += versal + +subdirs-$(CFG_HISILICON_CRYPTO_DRIVER) += hisilicon