diff --git a/core/arch/arm/include/arm64.h b/core/arch/arm/include/arm64.h index 423adfdb1d2..d2c4f0e74f0 100644 --- a/core/arch/arm/include/arm64.h +++ b/core/arch/arm/include/arm64.h @@ -288,6 +288,11 @@ static inline __noprof void dsb_ishst(void) asm volatile ("dsb ishst" : : : "memory"); } +static inline __noprof void dsb_osh(void) +{ + asm volatile ("dsb osh" : : : "memory"); +} + static inline __noprof void sev(void) { asm volatile ("sev" : : : "memory"); diff --git a/core/arch/arm/plat-d06/main.c b/core/arch/arm/plat-d06/main.c index 00853425726..73da3219da3 100644 --- a/core/arch/arm/plat-d06/main.c +++ b/core/arch/arm/plat-d06/main.c @@ -4,11 +4,11 @@ * Copyright (c) 2022, Huawei Technologies Co., Ltd */ #include +#include #include -#include #include -#include #include +#include static struct lpc_uart_data console_data __nex_bss; static struct gic_data gic_data __nex_bss; diff --git a/core/drivers/crypto/hisilicon/hisi_qm.c b/core/drivers/crypto/hisilicon/hisi_qm.c index 20a4b33bba7..b14a04cfb84 100644 --- a/core/drivers/crypto/hisilicon/hisi_qm.c +++ b/core/drivers/crypto/hisilicon/hisi_qm.c @@ -5,6 +5,7 @@ */ #include "hisi_qm.h" +#define QM_FVT_CFG_RDY_BIT 0x1 /* doorbell */ #define QM_DOORBELL_SQ_CQ_BASE 0x1000 #define QM_DB_CMD_SHIFT 12 @@ -19,6 +20,7 @@ #define QM_MAILBOX_DATA_ADDR_L 0x304 #define QM_MAILBOX_DATA_ADDR_H 0x308 #define QM_MB_BUSY_SHIFT 13 +#define QM_MB_BUSY_BIT BIT32(QM_MB_BUSY_SHIFT) #define QM_MB_OP_SHIFT 14 #define QM_MB_OP_WR 0 #define QM_MB_OP_RD 1 @@ -44,7 +46,9 @@ #define QM_MEM_START_INIT 0x100040 #define QM_MEM_INIT_DONE 0x100044 #define QM_VF_AEQ_INT_MASK 0x4 +#define QM_VF_AEQ_INT_MASK_EN 0x1 #define QM_VF_EQ_INT_MASK 0xc +#define QM_VF_EQ_INT_MASK_EN 0x1 #define QM_ARUSER_M_CFG_1 0x100088 #define QM_ARUSER_M_CFG_ENABLE 0x100090 #define QM_AWUSER_M_CFG_1 0x100098 @@ -66,7 +70,7 @@ #define QM_CQE_SIZE 4 #define QM_CQ_CQE_SIZE_SHIFT 12 /* CQE */ -#define QM_CQE_PHASE(cqe) (((cqe)->w7) & 0x1) +#define QM_CQE_PHASE(cqe) (((cqe)->w7) & QM_FVT_CFG_RDY_BIT) enum qm_mailbox_common_cmd { QM_MB_CMD_SQC = 0x0, @@ -101,104 +105,102 @@ struct qm_dfx_registers { uint32_t reg_offset; }; -static struct qm_dfx_registers qm_dfx_regs[] = { - {"QM_ECC_1BIT_CNT ", 0x104000}, - {"QM_ECC_MBIT_CNT ", 0x104008}, - {"QM_DFX_MB_CNT ", 0x104018}, - {"QM_DFX_DB_CNT ", 0x104028}, - {"QM_DFX_SQE_CNT ", 0x104038}, - {"QM_DFX_CQE_CNT ", 0x104048}, - {"QM_DFX_SEND_SQE_TO_ACC_CNT", 0x104050}, - {"QM_DFX_WB_SQE_FROM_ACC_CNT", 0x104058}, - {"QM_DFX_ACC_FINISH_CNT ", 0x104060}, - {"QM_DFX_CQE_ERR_CNT ", 0x1040b4}, - { NULL, 0} +static const struct qm_dfx_registers qm_dfx_regs[] = { + {.reg_name = "QM_ECC_1BIT_CNT ", .reg_offset = 0x104000}, + {.reg_name = "QM_ECC_MBIT_CNT ", .reg_offset = 0x104008}, + {.reg_name = "QM_DFX_MB_CNT ", .reg_offset = 0x104018}, + {.reg_name = "QM_DFX_DB_CNT ", .reg_offset = 0x104028}, + {.reg_name = "QM_DFX_SQE_CNT ", .reg_offset = 0x104038}, + {.reg_name = "QM_DFX_CQE_CNT ", .reg_offset = 0x104048}, + {.reg_name = "QM_DFX_SEND_SQE_TO_ACC_CNT", .reg_offset = 0x104050}, + {.reg_name = "QM_DFX_WB_SQE_FROM_ACC_CNT", .reg_offset = 0x104058}, + {.reg_name = "QM_DFX_ACC_FINISH_CNT ", .reg_offset = 0x104060}, + {.reg_name = "QM_DFX_CQE_ERR_CNT ", .reg_offset = 0x1040b4}, + {.reg_name = NULL, 0} }; void hisi_qm_get_version(struct hisi_qm *qm) { - uint32_t val = 0; - - val = io_read32(qm->io_base + QM_REVISON_ID_BASE); - qm->version = val & QM_REVISON_ID_MASK; + qm->version = io_read32(qm->io_base + HISI_QM_REVISON_ID_BASE) & + HISI_QM_REVISON_ID_MASK; } static void qm_db(struct hisi_qm *qm, uint16_t qn, uint8_t cmd, uint16_t index, uint8_t priority) { - uint16_t rand_data = QM_DB_RAND_DATA; uint64_t doorbell = 0; - doorbell = (qn | ((uint64_t)cmd << QM_DB_CMD_SHIFT) | - ((uint64_t)rand_data << QM_DB_RAND_DATA_SHIFT) | - ((uint64_t)index << QM_DB_INDEX_SHIFT) | - ((uint64_t)priority << QM_DB_PRIORITY_SHIFT)); + doorbell = qn | SHIFT_U64(cmd, QM_DB_CMD_SHIFT) | + SHIFT_U64(QM_DB_RAND_DATA, QM_DB_RAND_DATA_SHIFT) | + SHIFT_U64(index, QM_DB_INDEX_SHIFT) | + SHIFT_U64(priority, QM_DB_PRIORITY_SHIFT); io_write64(qm->io_base + QM_DOORBELL_SQ_CQ_BASE, doorbell); } -static int32_t qm_wait_mb_ready(struct hisi_qm *qm) +static TEE_Result qm_wait_mb_ready(struct hisi_qm *qm) { uint32_t val = 0; /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ - return readl_relaxed_poll_timeout(qm->io_base + QM_MAILBOX_BASE, - val, !((val >> QM_MB_BUSY_SHIFT) & - 0x1), POLL_PERIOD, POLL_TIMEOUT); + return readl_relaxed_poll_timeout(qm->io_base + QM_MAILBOX_BASE, val, + !(val & QM_MB_BUSY_BIT), POLL_PERIOD, + POLL_TIMEOUT); } static void qm_mb_write(struct hisi_qm *qm, void *src) { - uintptr_t dst = qm->io_base + QM_MAILBOX_BASE; + vaddr_t dst = qm->io_base + QM_MAILBOX_BASE; unsigned long tmp0 = 0; unsigned long tmp1 = 0; /* 128bits should be written to hardware at one time */ asm volatile ("ldp %0, %1, %3\n" "stp %0, %1, %2\n" - "dsb sy\n" : "=&r"(tmp0), "=&r"(tmp1), "+Q"(*((char *)dst)) : "Q"(*((char *)src)) : "memory"); + dsb(); } -static int32_t qm_mb(struct hisi_qm *qm, uint8_t cmd, uintptr_t dma_addr, - uint16_t qn, uint8_t op) +static TEE_Result qm_mb(struct hisi_qm *qm, uint8_t cmd, vaddr_t dma_addr, + uint16_t qn, uint8_t op) { - struct qm_mailbox mb = {0}; + struct qm_mailbox mb = { }; - mb.w0 = (cmd | (op ? 0x1 << QM_MB_OP_SHIFT : 0) | - (0x1 << QM_MB_BUSY_SHIFT)); + mb.w0 = cmd | SHIFT_U32(op, QM_MB_OP_SHIFT) | + BIT32(QM_MB_BUSY_SHIFT); mb.queue = qn; - mb.base_l = lower_32_bits(dma_addr); - mb.base_h = upper_32_bits(dma_addr); + reg_pair_from_64(dma_addr, &mb.base_h, &mb.base_l); mb.token = 0; if (qm_wait_mb_ready(qm)) { EMSG("QM mailbox is busy"); - return -DRVCRYPT_EBUSY; + return HISI_QM_DRVCRYPT_EBUSY; } qm_mb_write(qm, &mb); if (qm_wait_mb_ready(qm)) { EMSG("QM mailbox operation timeout"); - return -DRVCRYPT_EBUSY; + return HISI_QM_DRVCRYPT_EBUSY; } - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } static void qm_cfg_vft_data(struct hisi_qm *qm, uint8_t vft_type, uint32_t base, uint32_t number) { + uint32_t data_h = 0; + uint32_t data_l = 0; uint64_t data = 0; switch (vft_type) { case QM_SQC_VFT: - data = ((uint64_t)base << QM_SQC_VFT_START_SQN_SHIFT | + data = (SHIFT_U64(base, QM_SQC_VFT_START_SQN_SHIFT) | QM_SQC_VFT_VALID | - (uint64_t)(number - 1) << QM_SQC_VFT_SQ_NUM_SHIFT); + SHIFT_U64((number - 1), QM_SQC_VFT_SQ_NUM_SHIFT)); break; case QM_CQC_VFT: data = QM_CQC_VFT_VALID; @@ -208,18 +210,21 @@ static void qm_cfg_vft_data(struct hisi_qm *qm, uint8_t vft_type, break; } - io_write32(qm->io_base + QM_VFT_CFG_DATA_L, lower_32_bits(data)); - io_write32(qm->io_base + QM_VFT_CFG_DATA_H, upper_32_bits(data)); + reg_pair_from_64(data, &data_h, &data_l); + io_write32(qm->io_base + QM_VFT_CFG_DATA_L, data_l); + io_write32(qm->io_base + QM_VFT_CFG_DATA_H, data_h); } -static int32_t qm_set_vft_common(struct hisi_qm *qm, uint8_t vft_type, - uint32_t function, uint32_t base, uint32_t num) +static TEE_Result qm_set_vft_common(struct hisi_qm *qm, uint8_t vft_type, + uint32_t function, uint32_t base, + uint32_t num) { uint32_t val = 0; - int32_t ret = 0; + uint32_t ret = 0; ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, - val & 0x1, POLL_PERIOD, POLL_TIMEOUT); + val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD, + POLL_TIMEOUT); if (ret) { EMSG("QM VFT is not ready"); return ret; @@ -230,38 +235,39 @@ static int32_t qm_set_vft_common(struct hisi_qm *qm, uint8_t vft_type, io_write32(qm->io_base + QM_VFT_CFG_ADDRESS, function); qm_cfg_vft_data(qm, vft_type, base, num); io_write32(qm->io_base + QM_VFT_CFG_RDY, 0x0); - io_write32(qm->io_base + QM_VFT_CFG_OP_ENABLE, 0x1); + io_write32(qm->io_base + QM_VFT_CFG_OP_ENABLE, QM_FVT_CFG_RDY_BIT); return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, - val & 0x1, POLL_PERIOD, POLL_TIMEOUT); + val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD, + POLL_TIMEOUT); } -static int32_t qm_set_xqc_vft(struct hisi_qm *qm, uint32_t function, - uint32_t base, uint32_t num) +static TEE_Result qm_set_xqc_vft(struct hisi_qm *qm, uint32_t function, + uint32_t base, uint32_t num) { - int32_t ret = 0; + uint32_t ret = 0; int32_t i = 0; if (!num) { EMSG("Invalid sq num"); - return -DRVCRYPT_EINVAL; + return HISI_QM_DRVCRYPT_EINVAL; } for (i = QM_SQC_VFT; i <= QM_CQC_VFT; i++) { ret = qm_set_vft_common(qm, i, function, base, num); if (ret) { - EMSG("QM set type%d fail!\n", i); + EMSG("QM set type%d fail", i); return ret; } } - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } -static int32_t qm_get_vft(struct hisi_qm *qm, uint32_t *base, uint32_t *num) +static TEE_Result qm_get_vft(struct hisi_qm *qm, uint32_t *base, uint32_t *num) { uint64_t sqc_vft = 0; - int32_t ret = 0; + uint32_t ret = 0; ret = qm_mb(qm, QM_MB_CMD_SQC_VFT, 0, 0, QM_MB_OP_RD); if (ret) @@ -271,7 +277,7 @@ static int32_t qm_get_vft(struct hisi_qm *qm, uint32_t *base, uint32_t *num) *base = (sqc_vft >> QM_SQC_VFT_START_SQN_SHIFT) & QM_SQC_VFT_BASE_MASK; *num = ((sqc_vft >> QM_SQC_VFT_SQ_NUM_SHIFT) & QM_SQC_VFT_NUM_MASK) + 1; - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } static void qp_memory_uninit(struct hisi_qm *qm, uint32_t id) @@ -282,24 +288,24 @@ static void qp_memory_uninit(struct hisi_qm *qm, uint32_t id) free(qp->cqe); } -static int32_t qp_memory_init(struct hisi_qm *qm, uint32_t id) +static TEE_Result qp_memory_init(struct hisi_qm *qm, uint32_t id) { - size_t sq_size = qm->sqe_size * QM_Q_DEPTH; - size_t cq_size = sizeof(struct qm_cqe) * QM_Q_DEPTH; + size_t sq_size = qm->sqe_size * HISI_QM_Q_DEPTH; + size_t cq_size = sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH; struct hisi_qp *qp = &qm->qp_array[id]; - int32_t ret = 0; + uint32_t ret = 0; - qp->sqe = memalign(QM_ALIGN128, sq_size); + qp->sqe = memalign(HISI_QM_ALIGN128, sq_size); if (!qp->sqe) { - EMSG("Fail to malloc sq[%u]!\n", id); - return -DRVCRYPT_ENOMEM; + EMSG("Fail to malloc sq[%"PRIu32"]", id); + return HISI_QM_DRVCRYPT_ENOMEM; } qp->sqe_dma = virt_to_phys(qp->sqe); assert(qp->sqe_dma); - qp->cqe = (struct qm_cqe *)memalign(QM_ALIGN32, cq_size); + qp->cqe = memalign(HISI_QM_ALIGN32, cq_size); if (!qp->cqe) { - EMSG("Fail to malloc cq[%u]!\n", id); - ret = -DRVCRYPT_ENOMEM; + EMSG("Fail to malloc cq[%"PRIu32"]", id); + ret = HISI_QM_DRVCRYPT_ENOMEM; goto free_sqe; } qp->cqe_dma = virt_to_phys(qp->cqe); @@ -307,7 +313,7 @@ static int32_t qp_memory_init(struct hisi_qm *qm, uint32_t id) qp->qp_id = id; qp->qm = qm; - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; free_sqe: free(qp->sqe); @@ -326,29 +332,30 @@ static void qm_memory_uninit(struct hisi_qm *qm) free(qm->cqc); } -static int32_t qm_memory_init(struct hisi_qm *qm) +static TEE_Result qm_memory_init(struct hisi_qm *qm) { size_t sqc_size = 0; size_t cqc_size = 0; size_t qp_size = 0; - int32_t j, ret; + int32_t j = 0; + uint32_t ret = 0; uint32_t i; sqc_size = sizeof(struct qm_sqc) * qm->qp_num; cqc_size = sizeof(struct qm_cqc) * qm->qp_num; qp_size = sizeof(struct hisi_qp) * qm->qp_num; - qm->sqc = (struct qm_sqc *)memalign(QM_ALIGN32, sqc_size); + qm->sqc = memalign(HISI_QM_ALIGN32, sqc_size); if (!qm->sqc) { EMSG("Fail to malloc sqc"); - return -DRVCRYPT_ENOMEM; + return HISI_QM_DRVCRYPT_ENOMEM; } qm->sqc_dma = virt_to_phys(qm->sqc); assert(qm->sqc_dma); - qm->cqc = (struct qm_cqc *)memalign(QM_ALIGN32, cqc_size); + qm->cqc = memalign(HISI_QM_ALIGN32, cqc_size); if (!qm->cqc) { EMSG("Fail to malloc cqc"); - ret = -DRVCRYPT_ENOMEM; + ret = HISI_QM_DRVCRYPT_ENOMEM; goto free_sqc; } qm->cqc_dma = virt_to_phys(qm->cqc); @@ -357,19 +364,19 @@ static int32_t qm_memory_init(struct hisi_qm *qm) qm->qp_array = (struct hisi_qp *)malloc(qp_size); if (!qm->qp_array) { EMSG("Fail to malloc qp_array"); - ret = -DRVCRYPT_ENOMEM; + ret = HISI_QM_DRVCRYPT_ENOMEM; goto free_cqc; } for (i = 0; i < qm->qp_num; i++) { ret = qp_memory_init(qm, i); if (ret) { - ret = -DRVCRYPT_ENOMEM; + ret = HISI_QM_DRVCRYPT_ENOMEM; goto free_qp_mem; } } - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; free_qp_mem: for (j = (int32_t)i - 1; j >= 0; j--) @@ -382,11 +389,11 @@ static int32_t qm_memory_init(struct hisi_qm *qm) return ret; } -int32_t hisi_qm_init(struct hisi_qm *qm) +TEE_Result hisi_qm_init(struct hisi_qm *qm) { - int32_t ret = 0; + uint32_t ret = 0; - if (qm->fun_type == QM_HW_VF) { + if (qm->fun_type == HISI_QM_HW_VF) { ret = qm_get_vft(qm, &qm->qp_base, &qm->qp_num); if (ret) { EMSG("Fail to get function vft config"); @@ -396,7 +403,7 @@ int32_t hisi_qm_init(struct hisi_qm *qm) if (qm->qp_num == 0 || qm->sqe_size == 0) { EMSG("Invalid qm parameters"); - return -DRVCRYPT_EINVAL; + return HISI_QM_DRVCRYPT_EINVAL; } ret = qm_memory_init(qm); @@ -407,17 +414,18 @@ int32_t hisi_qm_init(struct hisi_qm *qm) qm->qp_idx = 0; mutex_init(&qm->qp_lock); - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } static void qm_cache_writeback(struct hisi_qm *qm) { uint32_t val = 0; - io_write32(qm->io_base + QM_CACHE_WB_START, 0x1); + io_write32(qm->io_base + QM_CACHE_WB_START, QM_FVT_CFG_RDY_BIT); if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, val, - val & 0x1, POLL_PERIOD, POLL_TIMEOUT)) + val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD, + POLL_TIMEOUT)) EMSG("QM writeback sqc cache fail"); } @@ -428,35 +436,35 @@ void hisi_qm_uninit(struct hisi_qm *qm) mutex_destroy(&qm->qp_lock); } -static int32_t qm_hw_mem_reset(struct hisi_qm *qm) +static TEE_Result qm_hw_mem_reset(struct hisi_qm *qm) { uint32_t val = 0; - io_write32(qm->io_base + QM_MEM_START_INIT, 0x1); + io_write32(qm->io_base + QM_MEM_START_INIT, QM_FVT_CFG_RDY_BIT); return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, - val & 0x1, POLL_PERIOD, + val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD, POLL_TIMEOUT); } -static int32_t qm_func_vft_cfg(struct hisi_qm *qm) +static TEE_Result qm_func_vft_cfg(struct hisi_qm *qm) { uint32_t q_base = qm->qp_num; uint32_t act_q_num = 0; uint32_t i = 0; uint32_t j = 0; - int32_t ret = 0; + uint32_t ret = 0; if (qm->vfs_num == 0) - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; - if (qm->vfs_num > QM_MAX_VFS_NUM) { + if (qm->vfs_num > HISI_QM_MAX_VFS_NUM) { EMSG("Invalid QM vfs_num"); - return -DRVCRYPT_EINVAL; + return HISI_QM_DRVCRYPT_EINVAL; } for (i = 1; i <= qm->vfs_num; i++) { - act_q_num = QM_VF_Q_NUM; + act_q_num = HISI_QM_VF_Q_NUM; ret = qm_set_xqc_vft(qm, i, q_base, act_q_num); if (ret) { for (j = 1; j < i; j++) @@ -466,14 +474,14 @@ static int32_t qm_func_vft_cfg(struct hisi_qm *qm) q_base += act_q_num; } - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } -int32_t hisi_qm_start(struct hisi_qm *qm) +TEE_Result hisi_qm_start(struct hisi_qm *qm) { - int32_t ret = 0; + uint32_t ret = 0; - if (qm->fun_type == QM_HW_PF) { + if (qm->fun_type == HISI_QM_HW_PF) { ret = qm_hw_mem_reset(qm); if (ret) { EMSG("Fail to reset qm hardware mem"); @@ -506,15 +514,15 @@ int32_t hisi_qm_start(struct hisi_qm *qm) } /* security mode does not support msi */ - io_write32(qm->io_base + QM_VF_AEQ_INT_MASK, 0x1); - io_write32(qm->io_base + QM_VF_EQ_INT_MASK, 0x1); + io_write32(qm->io_base + QM_VF_AEQ_INT_MASK, QM_VF_AEQ_INT_MASK_EN); + io_write32(qm->io_base + QM_VF_EQ_INT_MASK, QM_VF_EQ_INT_MASK_EN); - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } void hisi_qm_dev_init(struct hisi_qm *qm) { - if (qm->fun_type == QM_HW_VF) + if (qm->fun_type == HISI_QM_HW_VF) return; /* qm user domain */ @@ -525,38 +533,39 @@ void hisi_qm_dev_init(struct hisi_qm *qm) /* qm cache */ io_write32(qm->io_base + QM_AXI_M_CFG, AXI_M_CFG); - if (qm->version == QM_HW_V2) { + if (qm->version == HISI_QM_HW_V2) { /* disable FLR triggered by BME(bus master enable) */ io_write32(qm->io_base + QM_PEH_AXUSER_CFG, PEH_AXUSER_CFG); /* set sec sqc and cqc cache wb threshold 4 */ io_write32(qm->io_base + QM_CACHE_CTL, QM_CACHE_CFG); } /* disable qm ras */ - io_write32(qm->io_base + QM_ABNML_INT_MASK, QM_ABNML_INT_MASK_CFG); + io_write32(qm->io_base + HISI_QM_ABNML_INT_MASK, + HISI_QM_ABNML_INT_MASK_CFG); } -static int32_t qm_sqc_cfg(struct hisi_qp *qp) +static TEE_Result qm_sqc_cfg(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; struct qm_sqc *sqc = NULL; paddr_t sqc_dma = 0; int ret = 0; - sqc = (struct qm_sqc *)memalign(QM_ALIGN32, sizeof(struct qm_sqc)); + sqc = memalign(HISI_QM_ALIGN32, sizeof(struct qm_sqc)); if (!sqc) - return -DRVCRYPT_ENOMEM; + return HISI_QM_DRVCRYPT_ENOMEM; sqc_dma = virt_to_phys(sqc); assert(sqc_dma); memset(sqc, 0, sizeof(struct qm_sqc)); - sqc->base_l = lower_32_bits(qp->sqe_dma); - sqc->base_h = upper_32_bits(qp->sqe_dma); - sqc->dw3 = (QM_Q_DEPTH - 1) | qm->sqe_log2_size << QM_SQ_SQE_SIZE_SHIFT; + reg_pair_from_64(qp->sqe_dma, &sqc->base_h, &sqc->base_l); + sqc->dw3 = (HISI_QM_Q_DEPTH - 1) | + SHIFT_U32(qm->sqe_log2_size, QM_SQ_SQE_SIZE_SHIFT); sqc->rand_data = QM_DB_RAND_DATA; sqc->cq_num = qp->qp_id; - sqc->w13 = 0x1 << QM_SQ_ORDER_SHIFT | - (uint16_t)qp->sq_type << QM_SQ_TYPE_SHIFT; + sqc->w13 = BIT32(QM_SQ_ORDER_SHIFT) | + SHIFT_U32(qp->sq_type, QM_SQ_TYPE_SHIFT); ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp->qp_id, QM_MB_OP_WR); free(sqc); @@ -564,26 +573,26 @@ static int32_t qm_sqc_cfg(struct hisi_qp *qp) return ret; } -static int32_t qm_cqc_cfg(struct hisi_qp *qp) +static TEE_Result qm_cqc_cfg(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; struct qm_cqc *cqc = NULL; paddr_t cqc_dma = 0; int ret = 0; - cqc = (struct qm_cqc *)memalign(QM_ALIGN32, sizeof(struct qm_cqc)); + cqc = memalign(HISI_QM_ALIGN32, sizeof(struct qm_cqc)); if (!cqc) - return -DRVCRYPT_ENOMEM; + return HISI_QM_DRVCRYPT_ENOMEM; cqc_dma = virt_to_phys(cqc); assert(cqc_dma); memset(cqc, 0, sizeof(struct qm_cqc)); - cqc->base_l = lower_32_bits(qp->cqe_dma); - cqc->base_h = upper_32_bits(qp->cqe_dma); - cqc->dw3 = (QM_Q_DEPTH - 1) | QM_CQE_SIZE << QM_CQ_CQE_SIZE_SHIFT; + reg_pair_from_64(qp->cqe_dma, &cqc->base_h, &cqc->base_l); + cqc->dw3 = (HISI_QM_Q_DEPTH - 1) | + SHIFT_U32(QM_CQE_SIZE, QM_CQ_CQE_SIZE_SHIFT); cqc->rand_data = QM_DB_RAND_DATA; - cqc->dw6 = 0x1; + cqc->dw6 = PHASE_DEFAULT_VAL; ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp->qp_id, QM_MB_OP_WR); free(cqc); @@ -597,7 +606,7 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type) mutex_lock(&qm->qp_lock); if (qm->qp_in_used == qm->qp_num) { - EMSG("All %u queues of QM are busy!\n", qm->qp_num); + EMSG("All %"PRIu32" queues of QM are busy", qm->qp_num); goto err_proc; } @@ -607,19 +616,19 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type) qm->qp_idx++; qp = &qm->qp_array[qm->qp_idx]; - memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); + memset(qp->cqe, 0, sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH); qp->sq_type = sq_type; qp->sq_tail = 0; qp->cq_head = 0; qp->cqc_phase = true; if (qm_sqc_cfg(qp)) { - EMSG("Fail to set qp[%u] sqc!\n", qp->qp_id); + EMSG("Fail to set qp[%"PRIu32"] sqc", qp->qp_id); goto err_proc; } if (qm_cqc_cfg(qp)) { - EMSG("Fail to set qp[%u] cqc!\n", qp->qp_id); + EMSG("Fail to set qp[%"PRIu32"] cqc", qp->qp_id); goto err_proc; } @@ -649,7 +658,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp) static void qm_sq_tail_update(struct hisi_qp *qp) { - if (qp->sq_tail == QM_Q_DEPTH - 1) + if (qp->sq_tail == HISI_QM_Q_DEPTH - 1) qp->sq_tail = 0; else qp->sq_tail++; @@ -659,16 +668,15 @@ static void qm_sq_tail_update(struct hisi_qp *qp) * One task thread will just bind to one hardware queue, and * hardware does not support msi. So we have no lock here. */ -int32_t hisi_qp_send(struct hisi_qp *qp, void *msg) +TEE_Result hisi_qp_send(struct hisi_qp *qp, void *msg) { struct hisi_qm *qm = NULL; - uintptr_t tmp = 0; - int32_t ret = 0; + uint32_t ret = 0; void *sqe = NULL; if (!qp) { EMSG("qp is NULL"); - return -DRVCRYPT_EINVAL; + return HISI_QM_DRVCRYPT_EINVAL; } qm = qp->qm; @@ -676,8 +684,7 @@ int32_t hisi_qp_send(struct hisi_qp *qp, void *msg) if (ret) return ret; - tmp = (uintptr_t)qp->sqe + qm->sqe_size * qp->sq_tail; - sqe = (void *)tmp; + sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * qp->sq_tail); memset(sqe, 0, qm->sqe_size); ret = qp->fill_sqe(sqe, msg); @@ -688,15 +695,15 @@ int32_t hisi_qp_send(struct hisi_qp *qp, void *msg) qm_sq_tail_update(qp); - __asm__ volatile("dsb sy"); + dsb(); qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_SQ, qp->sq_tail, 0); - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } static void qm_cq_head_update(struct hisi_qp *qp) { - if (qp->cq_head == QM_Q_DEPTH - 1) { + if (qp->cq_head == HISI_QM_Q_DEPTH - 1) { qp->cqc_phase = !qp->cqc_phase; qp->cq_head = 0; } else { @@ -704,12 +711,12 @@ static void qm_cq_head_update(struct hisi_qp *qp) } } -static int32_t hisi_qp_recv(struct hisi_qp *qp, void *msg) +#define HISI_QM_RECV_DONE 1 +static TEE_Result hisi_qp_recv(struct hisi_qp *qp, void *msg) { struct hisi_qm *qm = qp->qm; struct qm_cqe *cqe = NULL; - uintptr_t tmp = 0; - int32_t ret = 0; + uint32_t ret = 0; void *sqe = NULL; ret = qm->dev_status_check(qm); @@ -718,9 +725,8 @@ static int32_t hisi_qp_recv(struct hisi_qp *qp, void *msg) cqe = qp->cqe + qp->cq_head; if (QM_CQE_PHASE(cqe) == qp->cqc_phase) { - __asm__ volatile("dmb osh"); - tmp = (uintptr_t)qp->sqe + qm->sqe_size * cqe->sq_head; - sqe = (void *)tmp; + dsb_osh(); + sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * cqe->sq_head); ret = qp->parse_sqe(sqe, msg); qm_cq_head_update(qp); qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->cq_head, 0); @@ -729,53 +735,52 @@ static int32_t hisi_qp_recv(struct hisi_qp *qp, void *msg) return ret; } } else { - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } - return 1; + return HISI_QM_RECV_DONE; } static void qm_dfx_dump(struct hisi_qm *qm) { - struct qm_dfx_registers *regs = NULL; + const struct qm_dfx_registers *regs = qm_dfx_regs; uint32_t val = 0; - if (qm->fun_type == QM_HW_VF) + if (qm->fun_type == HISI_QM_HW_VF) return; - regs = qm_dfx_regs; while (regs->reg_name) { val = io_read32(qm->io_base + regs->reg_offset); - EMSG("%s= 0x%x\n", regs->reg_name, val); + EMSG("%s= 0x%" PRIx32, regs->reg_name, val); regs++; } } -int32_t hisi_qp_recv_sync(struct hisi_qp *qp, void *msg) +TEE_Result hisi_qp_recv_sync(struct hisi_qp *qp, void *msg) { uint32_t cnt = 0; - int32_t ret = 0; + uint32_t ret = 0; if (!qp) { EMSG("qp is NULL"); - return -DRVCRYPT_EINVAL; + return HISI_QM_DRVCRYPT_EINVAL; } while (true) { ret = hisi_qp_recv(qp, msg); if (ret == 0) { - if (++cnt > QM_RECV_SYNC_TIMEOUT) { + if (++cnt > HISI_QM_RECV_SYNC_TIMEOUT) { EMSG("qm recv task timeout"); qm_dfx_dump(qp->qm); - ret = -DRVCRYPT_ETMOUT; - break; + return HISI_QM_DRVCRYPT_ETMOUT; } - } else if (ret < 0) { + } else { + if (ret == HISI_QM_RECV_DONE) + return HISI_QM_DRVCRYPT_NO_ERR; + EMSG("qm recv task error"); qm_dfx_dump(qp->qm); break; - } else if (ret > 0) { - return TEE_SUCCESS; } } diff --git a/core/drivers/crypto/hisilicon/include/hisi_cipher.h b/core/drivers/crypto/hisilicon/include/hisi_cipher.h index 1af705b7776..db941f1863f 100644 --- a/core/drivers/crypto/hisilicon/include/hisi_cipher.h +++ b/core/drivers/crypto/hisilicon/include/hisi_cipher.h @@ -24,7 +24,7 @@ enum C_MODE { C_MODE_CCM = 0x5, C_MODE_GCM = 0x6, C_MODE_XTS = 0x7, - C_MODE_CBC_CS = 0x9, + C_MODE_CBC_CS = 0x9, }; #define DES_KEY_SIZE 8 @@ -46,7 +46,11 @@ enum C_MODE { static inline uint32_t multiple_round(uint32_t x, uint32_t y) { - return (((x) + (y) - 1) / (y)); + uint32_t res = 0; + + assert(!ADD_OVERFLOW(x, y - 1, &res)); + + return res; } struct sec_cipher_ctx { diff --git a/core/drivers/crypto/hisilicon/include/hisi_qm.h b/core/drivers/crypto/hisilicon/include/hisi_qm.h index d12921e0229..b646bae4e2e 100644 --- a/core/drivers/crypto/hisilicon/include/hisi_qm.h +++ b/core/drivers/crypto/hisilicon/include/hisi_qm.h @@ -2,8 +2,8 @@ /* * Copyright (c) 2022, Huawei Technologies Co., Ltd */ -#ifndef __QM_H__ -#define __QM_H__ +#ifndef __HISI_QM_H__ +#define __HISI_QM_H__ #include #include @@ -15,39 +15,38 @@ #include #include -#define QM_HW_V2 0x21 -#define QM_HW_V3 0x30 -#define QM_MAX_VFS_NUM 63 -#define QM_PF_Q_BASE 0 -#define QM_PF_Q_NUM 64 -#define QM_VF_Q_NUM 15 -#define QM_Q_DEPTH 8 +#define HISI_QM_HW_V2 0x21 +#define HISI_QM_HW_V3 0x30 +#define HISI_QM_MAX_VFS_NUM 63 +#define HISI_QM_PF_Q_BASE 0 +#define HISI_QM_PF_Q_NUM 64 +#define HISI_QM_VF_Q_NUM 15 +#define HISI_QM_Q_DEPTH 8 +#define PHASE_DEFAULT_VAL 0x1 -#define QM_ABNML_INT_MASK 0x100004 -#define QM_ABNML_INT_MASK_CFG 0x7fff -#define QM_ABNML_INT_SRC 0x100000 -#define QM_HPRE_NFE_INT_MASK 0x6fb7 -#define QM_SEC_NFE_INT_MASK 0x6ff7 -#define QM_INVALID_DB BIT(12) -#define QM_REVISON_ID_BASE 0x1000dc -#define QM_REVISON_ID_MASK GENMASK_32(7, 0) -#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) -#define lower_32_bits(n) ((uint32_t)(n)) +#define HISI_QM_ABNML_INT_MASK 0x100004 +#define HISI_QM_ABNML_INT_MASK_CFG 0x7fff +#define HISI_QM_ABNML_INT_SRC 0x100000 +#define HISI_QM_HPRE_NFE_INT_MASK 0x6fb7 +#define HISI_QM_SEC_NFE_INT_MASK 0x6ff7 +#define HISI_QM_INVALID_DB BIT(12) +#define HISI_QM_REVISON_ID_BASE 0x1000dc +#define HISI_QM_REVISON_ID_MASK GENMASK_32(7, 0) #define POLL_PERIOD 10 #define POLL_TIMEOUT 1000 -#define QM_RECV_SYNC_TIMEOUT 0xfffffff -#define QM_ALIGN128 128 -#define QM_ALIGN32 32 +#define HISI_QM_RECV_SYNC_TIMEOUT 0xfffffff +#define HISI_QM_ALIGN128 128 +#define HISI_QM_ALIGN32 32 enum qm_fun_type { - QM_HW_PF, - QM_HW_VF, + HISI_QM_HW_PF, + HISI_QM_HW_VF, }; enum qm_sq_type { - QM_CHANNEL_TYPE0 = 0, - QM_CHANNEL_TYPE1, - QM_CHANNEL_TYPE2, + HISI_QM_CHANNEL_TYPE0 = 0, + HISI_QM_CHANNEL_TYPE1, + HISI_QM_CHANNEL_TYPE2, }; struct qm_sqc { @@ -141,13 +140,13 @@ struct hisi_qp { paddr_t sqe_dma; paddr_t cqe_dma; - int32_t (*fill_sqe)(void *sqe, void *msg); - int32_t (*parse_sqe)(void *sqe, void *msg); + TEE_Result (*fill_sqe)(void *sqe, void *msg); + TEE_Result (*parse_sqe)(void *sqe, void *msg); }; struct hisi_qm { enum qm_fun_type fun_type; - uintptr_t io_base; + vaddr_t io_base; uint32_t io_size; uint32_t vfs_num; uint32_t version; @@ -165,24 +164,24 @@ struct hisi_qm { struct hisi_qp *qp_array; struct mutex qp_lock; /* protect the qp instance */ - int32_t (*dev_status_check)(struct hisi_qm *qm); + TEE_Result (*dev_status_check)(struct hisi_qm *qm); }; -enum drvcrypt_status { - DRVCRYPT_NO_ERR = 0, - DRVCRYPT_FAIL = 1, - DRVCRYPT_EIO = 5, - DRVCRYPT_EAGAIN = 11, - DRVCRYPT_ENOMEM = 12, - DRVCRYPT_EFAULT = 14, - DRVCRYPT_EBUSY = 16, - DRVCRYPT_ENODEV = 19, - DRVCRYPT_EINVAL = 22, - DRVCRYPT_ETMOUT = 110, - DRVCRYPT_ENOPROC, - DRVCRYPT_IN_EPARA, - DRVCRYPT_VERIFY_ERR, - DRVCRYPT_HW_EACCESS, +enum hisi_drv_status { + HISI_QM_DRVCRYPT_NO_ERR = 0, + HISI_QM_DRVCRYPT_FAIL = 1, + HISI_QM_DRVCRYPT_EIO = 5, + HISI_QM_DRVCRYPT_EAGAIN = 11, + HISI_QM_DRVCRYPT_ENOMEM = 12, + HISI_QM_DRVCRYPT_EFAULT = 14, + HISI_QM_DRVCRYPT_EBUSY = 16, + HISI_QM_DRVCRYPT_ENODEV = 19, + HISI_QM_DRVCRYPT_EINVAL = 22, + HISI_QM_DRVCRYPT_ETMOUT = 110, + HISI_QM_DRVCRYPT_ENOPROC, + HISI_QM_DRVCRYPT_IN_EPARA, + HISI_QM_DRVCRYPT_VERIFY_ERR, + HISI_QM_DRVCRYPT_HW_EACCESS, }; #define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ @@ -199,12 +198,12 @@ enum drvcrypt_status { timeout += (_delay_us); \ udelay(_delay_us); \ } \ - (flag) ? 0 : -DRVCRYPT_ETMOUT; \ + (flag) ? 0 : HISI_QM_DRVCRYPT_ETMOUT; \ }) struct acc_device { struct hisi_qm qm; - uintptr_t io_base; + vaddr_t io_base; uint32_t io_size; uint32_t vfs_num; uint32_t endian; @@ -221,9 +220,9 @@ void hisi_qm_get_version(struct hisi_qm *qm); /** *@Description: Init QM for Kunpeng drv *@param qm: Handle of Queue Management module - *@return success: 0,fail: -DRVCRYPT_EBUSY/DRVCRYPT_EINVAL + *@return success: 0,fail: HISI_QM_DRVCRYPT_EBUSY/HISI_QM_DRVCRYPT_EINVAL */ -int32_t hisi_qm_init(struct hisi_qm *qm); +TEE_Result hisi_qm_init(struct hisi_qm *qm); /** *@Description:deinit QM for Kunpeng drv @@ -235,7 +234,7 @@ void hisi_qm_uninit(struct hisi_qm *qm); *@Description: Start QM for Kunpeng drv *@param qm: Handle of Queue Management module */ -int32_t hisi_qm_start(struct hisi_qm *qm); +TEE_Result hisi_qm_start(struct hisi_qm *qm); /** *@Description: Config QM for Kunpeng drv @@ -247,28 +246,29 @@ void hisi_qm_dev_init(struct hisi_qm *qm); *@Description: Create Queue Pair, allocated to PF/VF for configure * and service use. Each QP includes one SQ and one CQ *@param qm: Handle of Queue Management module + *@param sq_type: Accelerator specific algorithm type in sqc *@return success: Handle of QP,fail: NULL */ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type); /** *@Description:Release Queue Pair - *@param qm: Handle of Queue Management module + *@param qp: Handle of Queue Pair */ void hisi_qm_release_qp(struct hisi_qp *qp); /** *@Description: Send SQE(Submmision Queue Element) to Kunpeng dev *@param qm: Handle of Queue Management module - *@return success: 0,fail: -DRVCRYPT_EINVAL + *@return success: 0,fail: HISI_QM_DRVCRYPT_EINVAL */ -int32_t hisi_qp_send(struct hisi_qp *qp, void *msg); +TEE_Result hisi_qp_send(struct hisi_qp *qp, void *msg); /** *@Description: Recevice result from Kunpeng dev *@param qm: Handle of Queue Management module - *@return success: 0,fail: -DRVCRYPT_EINVAL + *@return success: 0,fail: HISI_QM_DRVCRYPT_EINVAL */ -int32_t hisi_qp_recv_sync(struct hisi_qp *qp, void *msg); +TEE_Result hisi_qp_recv_sync(struct hisi_qp *qp, void *msg); #endif diff --git a/core/drivers/crypto/hisilicon/include/hisi_sec.h b/core/drivers/crypto/hisilicon/include/hisi_sec.h index a220b8a0aac..0e57540d533 100644 --- a/core/drivers/crypto/hisilicon/include/hisi_sec.h +++ b/core/drivers/crypto/hisilicon/include/hisi_sec.h @@ -336,6 +336,12 @@ enum hisi_buff_type { HISI_SGL_BUF, }; +/** + *@Description: Create Queue Pair for SEC, allocated to PF/VF for configure + * and service use. Each QP includes one SQ and one CQ + *@param sq_type: Accelerator specific algorithm type in sqc + *@return success: Handle of QP,fail: NULL + */ struct hisi_qp *hisi_sec_create_qp(uint8_t sq_type); #endif diff --git a/core/drivers/crypto/hisilicon/sec/hisi_sec.c b/core/drivers/crypto/hisilicon/sec/hisi_sec.c index a02f200f615..0e0317f4cde 100644 --- a/core/drivers/crypto/hisilicon/sec/hisi_sec.c +++ b/core/drivers/crypto/hisilicon/sec/hisi_sec.c @@ -30,7 +30,6 @@ #define SEC_RAS_FE_ENB_MASK 0x0 #define SEC_RAS_NFE_ENB_MASK 0x177 #define SEC_CLK_GATE_ENABLE BIT(3) -#define SEC_CLK_GATE_DISABLE (~BIT(3)) #define SEC_DYNAMIC_GATE_EN 0x7bff #define SEC_CORE_AUTO_GATE_EN GENMASK_32(3, 0) #define SEC_TRNG_EN_MASK BIT(8) @@ -58,8 +57,8 @@ struct hisi_qp *hisi_sec_create_qp(uint8_t sq_type) SLIST_FOREACH(cur_dev, &sec_list, link) { qm = &cur_dev->qm; - free_qp_num = (qm->fun_type == QM_HW_PF ? - QM_PF_Q_NUM : QM_VF_Q_NUM) - qm->qp_in_used; + free_qp_num = (qm->fun_type == HISI_QM_HW_PF ? + HISI_QM_PF_Q_NUM : HISI_QM_VF_Q_NUM) - qm->qp_in_used; if (free_qp_num > max_qp_num) { max_qp_num = free_qp_num; sec_dev = cur_dev; @@ -76,36 +75,28 @@ struct hisi_qp *hisi_sec_create_qp(uint8_t sq_type) static void sec_disable_clock_gate(struct hisi_qm *qm) { - uint32_t val = 0; - - /* QM_HW_V2 version need to close clock gating */ - val = io_read32(qm->io_base + SEC_CONTROL_REG); - val &= SEC_CLK_GATE_DISABLE; - io_write32(qm->io_base + SEC_CONTROL_REG, val); + /* HISI_QM_HW_V2 version need to close clock gating */ + io_clrbits32(qm->io_base + SEC_CONTROL_REG, SEC_CLK_GATE_ENABLE); } static void sec_enable_clock_gate(struct hisi_qm *qm) { - uint32_t val = 0; - - if (qm->version == QM_HW_V2) + if (qm->version == HISI_QM_HW_V2) return; - val = io_read32(qm->io_base + SEC_CONTROL_REG); - val |= SEC_CLK_GATE_ENABLE; - io_write32(qm->io_base + SEC_CONTROL_REG, val); + io_setbits32(qm->io_base + SEC_CONTROL_REG, SEC_CLK_GATE_ENABLE); io_write32(qm->io_base + SEC_DYNAMIC_GATE_V3, SEC_DYNAMIC_GATE_EN); io_write32(qm->io_base + SEC_CORE_AUTO_GATE_V3, SEC_CORE_AUTO_GATE_EN); } -static int32_t sec_engine_init(struct acc_device *sec_dev) +static uint32_t sec_engine_init(struct acc_device *sec_dev) { struct hisi_qm *qm = &sec_dev->qm; uint32_t val = 0; - int32_t ret = 0; + uint32_t ret = 0; - if (qm->fun_type == QM_HW_VF) - return TEE_SUCCESS; + if (qm->fun_type == HISI_QM_HW_VF) + return HISI_QM_DRVCRYPT_NO_ERR; sec_disable_clock_gate(qm); hisi_qm_dev_init(qm); @@ -118,11 +109,9 @@ static int32_t sec_engine_init(struct acc_device *sec_dev) return ret; } - val = io_read32(qm->io_base + SEC_CONTROL_REG); - val |= SEC_TRNG_EN_MASK; - io_write32(qm->io_base + SEC_CONTROL_REG, val); + io_setbits32(qm->io_base + SEC_CONTROL_REG, SEC_TRNG_EN_MASK); - if (qm->version == QM_HW_V2) { + if (qm->version == HISI_QM_HW_V2) { /* smmu bypass */ io_write32(qm->io_base + SEC_INTERFACE_USER_CTRL0, SEC_USER0_CFG); @@ -150,40 +139,38 @@ static int32_t sec_engine_init(struct acc_device *sec_dev) io_write32(qm->io_base + SEC_RAS_NFE_ENABLE, SEC_RAS_NFE_ENB_MASK); io_write32(qm->io_base + SEC_CORE_INT_MASK, SEC_ABNML_INT_DISABLE); - val = io_read32(qm->io_base + SEC_CONTROL_REG); - val |= sec_dev->endian; - io_write32(qm->io_base + SEC_CONTROL_REG, val); + io_setbits32(qm->io_base + SEC_CONTROL_REG, sec_dev->endian); sec_enable_clock_gate(qm); - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } -static int32_t sec_dev_status_check(struct hisi_qm *qm) +static uint32_t sec_dev_status_check(struct hisi_qm *qm) { uint32_t val = 0; val = io_read32(qm->io_base + SEC_CORE_INT_SOURCE); if (val & SEC_RAS_NFE_ENB_MASK) { - EMSG("SEC NFE RAS happened, need to reset.\n"); - return -DRVCRYPT_HW_EACCESS; + EMSG("SEC NFE RAS happened, need to reset"); + return HISI_QM_DRVCRYPT_HW_EACCESS; } - val = io_read32(qm->io_base + QM_ABNML_INT_SRC); + val = io_read32(qm->io_base + HISI_QM_ABNML_INT_SRC); if (val) { - if (val & QM_SEC_NFE_INT_MASK) - EMSG("QM NFE RAS happened, need to reset.\n"); + if (val & HISI_QM_SEC_NFE_INT_MASK) + EMSG("QM NFE RAS happened, need to reset"); - if (val & QM_INVALID_DB) { - EMSG("QM invalid db happened, please check.\n"); - io_write32(qm->io_base + QM_ABNML_INT_SRC, - QM_INVALID_DB); + if (val & HISI_QM_INVALID_DB) { + EMSG("QM invalid db happened, please check"); + io_write32(qm->io_base + HISI_QM_ABNML_INT_SRC, + HISI_QM_INVALID_DB); } - return -DRVCRYPT_HW_EACCESS; + return HISI_QM_DRVCRYPT_HW_EACCESS; } - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; } static int32_t sec_qm_init(struct acc_device *sec_dev) @@ -191,11 +178,11 @@ static int32_t sec_qm_init(struct acc_device *sec_dev) struct hisi_qm *qm = &sec_dev->qm; if (cpu_mmu_enabled()) { - qm->io_base = (uintptr_t)phys_to_virt_io(sec_dev->io_base, + qm->io_base = (vaddr_t)phys_to_virt_io(sec_dev->io_base, sec_dev->io_size); if (!qm->io_base) { EMSG("Fail to get qm io_base"); - return -DRVCRYPT_EFAULT; + return HISI_QM_DRVCRYPT_EFAULT; } } else { qm->io_base = sec_dev->io_base; @@ -205,11 +192,11 @@ static int32_t sec_qm_init(struct acc_device *sec_dev) qm->vfs_num = sec_dev->vfs_num; qm->sqe_size = SEC_SQE_SIZE; qm->sqe_log2_size = SEC_SQE_LOG2_SIZE; - if (qm->fun_type == QM_HW_PF) { + if (qm->fun_type == HISI_QM_HW_PF) { hisi_qm_get_version(qm); - IMSG("SEC hardware version is 0x%x\n", qm->version); - qm->qp_base = QM_PF_Q_BASE; - qm->qp_num = QM_PF_Q_NUM; + IMSG("SEC hardware version is 0x%" PRIx32, qm->version); + qm->qp_base = HISI_QM_PF_Q_BASE; + qm->qp_num = HISI_QM_PF_Q_NUM; qm->dev_status_check = sec_dev_status_check; } @@ -230,22 +217,22 @@ static struct acc_device *sec_pre_init(void) sec_dev->io_size = SEC_SIZE; sec_dev->vfs_num = 0; sec_dev->endian = SEC_LITTLE_ENDIAN; - sec_dev->fun_type = QM_HW_PF; + sec_dev->fun_type = HISI_QM_HW_PF; SLIST_INSERT_HEAD(&sec_list, sec_dev, link); return sec_dev; } -static uint32_t sec_probe(void) +static TEE_Result sec_probe(void) { struct acc_device *sec_dev = NULL; struct hisi_qm *qm = NULL; - int32_t ret = 0; + uint32_t ret = 0; - IMSG("SEC driver init start, version %s!\n", SEC_MODULE_VERSION); + IMSG("SEC driver init start, version %s", SEC_MODULE_VERSION); sec_dev = sec_pre_init(); if (!sec_dev) - return DRVCRYPT_ENOMEM; + return HISI_QM_DRVCRYPT_ENOMEM; qm = &sec_dev->qm; ret = sec_qm_init(sec_dev); @@ -267,7 +254,7 @@ static uint32_t sec_probe(void) } IMSG("SEC driver init done"); - return TEE_SUCCESS; + return HISI_QM_DRVCRYPT_NO_ERR; err_with_qm_init: hisi_qm_uninit(qm); @@ -275,7 +262,7 @@ static uint32_t sec_probe(void) SLIST_REMOVE_HEAD(&sec_list, link); free(sec_dev); - return DRVCRYPT_FAIL; + return HISI_QM_DRVCRYPT_FAIL; } driver_init(sec_probe); diff --git a/core/drivers/crypto/hisilicon/sec/sec_cipher.c b/core/drivers/crypto/hisilicon/sec/sec_cipher.c index 8e0d0ff4584..851688dbafd 100644 --- a/core/drivers/crypto/hisilicon/sec/sec_cipher.c +++ b/core/drivers/crypto/hisilicon/sec/sec_cipher.c @@ -2,22 +2,22 @@ /* * Copyright (c) 2022, HiSilicon Limited */ -#include "hisi_sec.h" #include "hisi_cipher.h" +#include "hisi_sec.h" static TEE_Result sec_do_cipher_task(struct hisi_qp *qp, void *msg) { - int32_t ret = 0; + uint32_t ret = 0; ret = hisi_qp_send(qp, msg); if (ret) { - EMSG("Fail to send task, ret=%d!\n", ret); + EMSG("Fail to send task, ret=%d", ret); return TEE_ERROR_BAD_STATE; } ret = hisi_qp_recv_sync(qp, msg); if (ret) { - EMSG("Recv task error, ret=%d!\n", ret); + EMSG("Recv task error, ret=%d", ret); return TEE_ERROR_BAD_STATE; } @@ -54,17 +54,18 @@ static void xts_multi_galois(unsigned char *data) data[0] ^= 0x87; } -static int sec_cipher_set_key(struct sec_cipher_ctx *c_ctx, - const uint8_t *key1, const int key1_len, - const uint8_t *key2, const int key2_len); +static TEE_Result sec_cipher_set_key(struct sec_cipher_ctx *c_ctx, + const uint8_t *key1, const int key1_len, + const uint8_t *key2, const int key2_len); -/* When the IV is delivered by segment, the AES/SM4-ECB is used +/* + * When the IV is delivered by segment, the AES/SM4-ECB is used * to update the IV to be used next time. */ static uint32_t xts_iv_update(struct sec_cipher_ctx *c_ctx) { size_t xts_key_len = c_ctx->key_len / 2; - struct sec_cipher_ctx ecb_ctx = {0}; + struct sec_cipher_ctx ecb_ctx = { }; uint32_t ret = 0; size_t i = 0; @@ -84,7 +85,7 @@ static uint32_t xts_iv_update(struct sec_cipher_ctx *c_ctx) ret = sec_do_cipher_task(c_ctx->qp, &ecb_ctx); if (ret != 0) { - EMSG("Xts iv enc failed . ret = %x.\n", ret); + EMSG("Xts iv enc failed . ret = 0x%"PRIx32, ret); return ret; } @@ -94,7 +95,7 @@ static uint32_t xts_iv_update(struct sec_cipher_ctx *c_ctx) ecb_ctx.encrypt = false; ret = sec_do_cipher_task(c_ctx->qp, &ecb_ctx); if (ret != 0) - EMSG("Xts iv denc failed . ret = %x.\n", ret); + EMSG("Xts iv denc failed . ret = 0x%"PRIx32, ret); return ret; } @@ -125,7 +126,8 @@ static uint32_t sec_update_iv(struct sec_cipher_ctx *c_ctx) return ret; } -static int sec_cipher_des_get_c_key_len(const int key_len, uint8_t *c_key_len) +static TEE_Result sec_cipher_des_get_c_key_len(const int key_len, + uint8_t *c_key_len) { if (key_len == DES_KEY_SIZE) { *c_key_len = CKEY_LEN_DES; @@ -137,7 +139,8 @@ static int sec_cipher_des_get_c_key_len(const int key_len, uint8_t *c_key_len) return TEE_SUCCESS; } -static int sec_cipher_3des_get_c_key_len(const int key_len, uint8_t *c_key_len) +static TEE_Result sec_cipher_3des_get_c_key_len(const int key_len, + uint8_t *c_key_len) { if (key_len == SEC_3DES_2KEY_SIZE) { *c_key_len = CKEY_LEN_3DES_2KEY; @@ -151,8 +154,9 @@ static int sec_cipher_3des_get_c_key_len(const int key_len, uint8_t *c_key_len) return TEE_SUCCESS; } -static int sec_cipher_aes_get_c_key_len(const int key_len, - const uint8_t mode, uint8_t *c_key_len) +static TEE_Result sec_cipher_aes_get_c_key_len(const int key_len, + const uint8_t mode, + uint8_t *c_key_len) { switch (mode) { case C_MODE_ECB: @@ -195,8 +199,9 @@ static int sec_cipher_aes_get_c_key_len(const int key_len, return TEE_SUCCESS; } -static int sec_cipher_sm4_get_c_key_len(const int key_len, const uint8_t mode, - uint8_t *c_key_len) +static TEE_Result sec_cipher_sm4_get_c_key_len(const int key_len, + const uint8_t mode, + uint8_t *c_key_len) { switch (mode) { case C_MODE_ECB: @@ -223,13 +228,13 @@ static int sec_cipher_sm4_get_c_key_len(const int key_len, const uint8_t mode, return TEE_SUCCESS; } -static int sec_cipher_set_key(struct sec_cipher_ctx *c_ctx, - const uint8_t *key1, const int key1_len, - const uint8_t *key2, const int key2_len) +static TEE_Result sec_cipher_set_key(struct sec_cipher_ctx *c_ctx, + const uint8_t *key1, const int key1_len, + const uint8_t *key2, const int key2_len) { int key_len = key1_len + key2_len; uint8_t c_key_len = 0; - int ret = 0; + uint32_t ret = 0; switch (c_ctx->alg) { case C_ALG_DES: @@ -247,7 +252,7 @@ static int sec_cipher_set_key(struct sec_cipher_ctx *c_ctx, c_ctx->mode, &c_key_len); break; default: - EMSG("Invalid cipher type! %x\n", c_ctx->alg); + EMSG("Invalid cipher type 0x%"PRIx32, c_ctx->alg); ret = TEE_ERROR_NOT_IMPLEMENTED; break; } @@ -267,9 +272,10 @@ static int sec_cipher_set_key(struct sec_cipher_ctx *c_ctx, return ret; } -static int sec_cipher_iv_check(struct sec_cipher_ctx *c_ctx, const int iv_size) +static TEE_Result sec_cipher_iv_check(struct sec_cipher_ctx *c_ctx, + const int iv_size) { - int ret = 0; + TEE_Result ret = 0; switch (c_ctx->mode) { case C_MODE_ECB: @@ -296,15 +302,15 @@ static int sec_cipher_iv_check(struct sec_cipher_ctx *c_ctx, const int iv_size) } if (ret) - EMSG("iv_size check failed.\n"); + EMSG("iv_size check failed"); return ret; } -static int sec_cipher_set_iv(struct sec_cipher_ctx *c_ctx, const uint8_t *iv, - const int iv_len) +static TEE_Result sec_cipher_set_iv(struct sec_cipher_ctx *c_ctx, + const uint8_t *iv, const int iv_len) { - int ret = 0; + TEE_Result ret = 0; if (!iv && iv_len != 0) { EMSG("Iv is NULL"); @@ -324,7 +330,7 @@ static int sec_cipher_set_iv(struct sec_cipher_ctx *c_ctx, const uint8_t *iv, return TEE_SUCCESS; } -static int32_t sec_cipher_bd_fill(void *bd, void *msg) +static TEE_Result sec_cipher_bd_fill(void *bd, void *msg) { struct sec_cipher_ctx *c_ctx = (struct sec_cipher_ctx *)msg; struct hisi_sec_sqe *sqe = (struct hisi_sec_sqe *)bd; @@ -344,28 +350,27 @@ static int32_t sec_cipher_bd_fill(void *bd, void *msg) else sqe->cipher = CIPHER_DECRYPT; - sqe->type2.data_dst_addr_l = lower_32_bits(c_ctx->out_dma); - sqe->type2.data_dst_addr_h = upper_32_bits(c_ctx->out_dma); - sqe->type2.data_src_addr_l = lower_32_bits(c_ctx->in_dma); - sqe->type2.data_src_addr_h = upper_32_bits(c_ctx->in_dma); - sqe->type2.c_key_addr_l = lower_32_bits(c_ctx->key_dma); - sqe->type2.c_key_addr_h = upper_32_bits(c_ctx->key_dma); - + reg_pair_from_64(c_ctx->out_dma, &sqe->type2.data_dst_addr_h, + &sqe->type2.data_dst_addr_l); + reg_pair_from_64(c_ctx->in_dma, &sqe->type2.data_src_addr_h, + &sqe->type2.data_src_addr_l); + reg_pair_from_64(c_ctx->key_dma, &sqe->type2.c_key_addr_h, + &sqe->type2.c_key_addr_l); if (c_ctx->iv_len == 0) return TEE_SUCCESS; - sqe->type2.c_ivin_addr_l = lower_32_bits(c_ctx->iv_dma); - sqe->type2.c_ivin_addr_h = upper_32_bits(c_ctx->iv_dma); + reg_pair_from_64(c_ctx->iv_dma, &sqe->type2.c_ivin_addr_h, + &sqe->type2.c_ivin_addr_l); return TEE_SUCCESS; } -static int32_t sec_cipher_bd_parse(void *bd, void *msg __unused) +static TEE_Result sec_cipher_bd_parse(void *bd, void *msg __unused) { struct hisi_sec_sqe *sqe = (struct hisi_sec_sqe *)bd; if (sqe->type2.done != SEC_HW_TASK_DONE || sqe->type2.error_type) { - EMSG("SEC BD2 fail! done=0x%x, etype=0x%x\n", + EMSG("SEC BD2 fail done=0x%" PRIx32 ", etype=0x%" PRIx32, sqe->type2.done, sqe->type2.error_type); return TEE_ERROR_BAD_PARAMETERS; } @@ -373,7 +378,7 @@ static int32_t sec_cipher_bd_parse(void *bd, void *msg __unused) return TEE_SUCCESS; } -static int32_t sec_cipher_bd3_fill(void *bd, void *msg) +static TEE_Result sec_cipher_bd3_fill(void *bd, void *msg) { struct hisi_sec_bd3_sqe *sqe = (struct hisi_sec_bd3_sqe *)bd; struct sec_cipher_ctx *c_ctx = (struct sec_cipher_ctx *)msg; @@ -393,27 +398,28 @@ static int32_t sec_cipher_bd3_fill(void *bd, void *msg) else sqe->cipher = CIPHER_DECRYPT; - sqe->data_dst_addr_l = lower_32_bits(c_ctx->out_dma); - sqe->data_dst_addr_h = upper_32_bits(c_ctx->out_dma); - sqe->data_src_addr_l = lower_32_bits(c_ctx->in_dma); - sqe->data_src_addr_h = upper_32_bits(c_ctx->in_dma); - sqe->c_key_addr_l = lower_32_bits(c_ctx->key_dma); - sqe->c_key_addr_h = upper_32_bits(c_ctx->key_dma); + reg_pair_from_64(c_ctx->out_dma, &sqe->data_dst_addr_h, + &sqe->data_dst_addr_l); + reg_pair_from_64(c_ctx->in_dma, &sqe->data_src_addr_h, + &sqe->data_src_addr_l); + reg_pair_from_64(c_ctx->key_dma, &sqe->c_key_addr_h, + &sqe->c_key_addr_l); if (c_ctx->iv_len == 0) return TEE_SUCCESS; - sqe->ipsec_scene.c_ivin_addr_l = lower_32_bits(c_ctx->iv_dma); - sqe->ipsec_scene.c_ivin_addr_h = upper_32_bits(c_ctx->iv_dma); + reg_pair_from_64(c_ctx->iv_dma, &sqe->ipsec_scene.c_ivin_addr_h, + &sqe->ipsec_scene.c_ivin_addr_l); + return TEE_SUCCESS; } -static int32_t sec_cipher_bd3_parse(void *bd, void *msg __unused) +static TEE_Result sec_cipher_bd3_parse(void *bd, void *msg __unused) { struct hisi_sec_bd3_sqe *sqe = (struct hisi_sec_bd3_sqe *)bd; if (sqe->done != SEC_HW_TASK_DONE || sqe->error_type) { - EMSG("SEC BD3 fail! done=0x%x, etype=0x%x\n", + EMSG("SEC BD3 fail done=0x%" PRIx32 ", etype=0x%" PRIx32, sqe->done, sqe->error_type); return TEE_ERROR_BAD_PARAMETERS; } @@ -443,9 +449,10 @@ static TEE_Result cipher_algo_check(uint32_t algo) return TEE_SUCCESS; } -static int crypto_set_calg(struct sec_cipher_ctx *c_ctx, const uint32_t alg) +static TEE_Result crypto_set_calg(struct sec_cipher_ctx *c_ctx, + const uint32_t alg) { - int ret = TEE_SUCCESS; + TEE_Result ret = TEE_SUCCESS; switch (alg) { case TEE_MAIN_ALGO_DES: @@ -461,7 +468,7 @@ static int crypto_set_calg(struct sec_cipher_ctx *c_ctx, const uint32_t alg) c_ctx->alg = C_ALG_SM4; break; default: - EMSG("Invalid cipher type! %x\n", alg); + EMSG("Invalid cipher type 0x%"PRIx8, alg); ret = TEE_ERROR_NOT_IMPLEMENTED; break; } @@ -469,9 +476,10 @@ static int crypto_set_calg(struct sec_cipher_ctx *c_ctx, const uint32_t alg) return ret; } -static int crypto_set_cmode(struct sec_cipher_ctx *c_ctx, const uint32_t mode) +static TEE_Result crypto_set_cmode(struct sec_cipher_ctx *c_ctx, + const uint32_t mode) { - int ret = TEE_SUCCESS; + TEE_Result ret = TEE_SUCCESS; switch (mode) { case TEE_CHAIN_MODE_ECB_NOPAD: @@ -487,7 +495,7 @@ static int crypto_set_cmode(struct sec_cipher_ctx *c_ctx, const uint32_t mode) c_ctx->mode = C_MODE_CTR; break; default: - EMSG("Invalid cipher mode type! %x\n", mode); + EMSG("Invalid cipher mode type 0x%"PRIx32, mode); ret = TEE_ERROR_NOT_IMPLEMENTED; break; } @@ -498,7 +506,7 @@ static int crypto_set_cmode(struct sec_cipher_ctx *c_ctx, const uint32_t mode) static TEE_Result sec_cipher_alloc_ctx(void **ctx, uint32_t algo) { struct sec_cipher_ctx *c_ctx = NULL; - int ret = 0; + TEE_Result ret = 0; if (!ctx) { EMSG("Ctx is NULL"); @@ -523,13 +531,13 @@ static TEE_Result sec_cipher_alloc_ctx(void **ctx, uint32_t algo) if (ret) goto free_c_ctx; - c_ctx->qp = hisi_sec_create_qp(QM_CHANNEL_TYPE0); + c_ctx->qp = hisi_sec_create_qp(HISI_QM_CHANNEL_TYPE0); if (!c_ctx->qp) { ret = TEE_ERROR_BUSY; goto free_c_ctx; } - if (c_ctx->qp->qm->version == QM_HW_V2) { + if (c_ctx->qp->qm->version == HISI_QM_HW_V2) { c_ctx->qp->fill_sqe = sec_cipher_bd_fill; c_ctx->qp->parse_sqe = sec_cipher_bd_parse; } else { @@ -635,11 +643,11 @@ static TEE_Result sec_cipher_param_check(struct drvcrypt_cipher_update *dupdate) return TEE_SUCCESS; } -static int sec_alloc_buffer(struct sec_cipher_ctx *c_ctx) +static TEE_Result sec_alloc_buffer(struct sec_cipher_ctx *c_ctx) { c_ctx->in = (uint8_t *)malloc(c_ctx->len); if (!c_ctx->in) { - EMSG("Failed to alloc c_in buf.\n"); + EMSG("Failed to alloc c_in buf"); return TEE_ERROR_STORAGE_NO_SPACE; } @@ -648,7 +656,7 @@ static int sec_alloc_buffer(struct sec_cipher_ctx *c_ctx) c_ctx->out = (uint8_t *)malloc(c_ctx->len); if (!c_ctx->out) { - EMSG("Failed to alloc c_out buf.\n"); + EMSG("Failed to alloc c_out buf"); goto free_c_in; } @@ -698,7 +706,7 @@ static TEE_Result sec_cipher_update(struct drvcrypt_cipher_update *dupdate) ret = sec_update_iv(c_ctx); if (ret != 0) { - EMSG("sec_update_iv failed. ret = %x.\n", ret); + EMSG("sec_update_iv failed. ret = 0x%"PRIx32, ret); goto free_buffer; } @@ -766,7 +774,7 @@ static TEE_Result sec_init_cipher(void) TEE_Result ret = drvcrypt_register_cipher(&driver_cipher); if (ret != TEE_SUCCESS) - EMSG("Sec cipher register failed. ret = 0x%x.\n", ret); + EMSG("Sec cipher register failed. ret = 0x%" PRIx32, ret); return ret; }