Path: blob/master/drivers/crypto/hisilicon/sec2/sec_main.c
29537 views
// SPDX-License-Identifier: GPL-2.01/* Copyright (c) 2019 HiSilicon Limited. */23#include <linux/acpi.h>4#include <linux/bitops.h>5#include <linux/debugfs.h>6#include <linux/init.h>7#include <linux/io.h>8#include <linux/iommu.h>9#include <linux/kernel.h>10#include <linux/module.h>11#include <linux/pci.h>12#include <linux/pm_runtime.h>13#include <linux/seq_file.h>14#include <linux/topology.h>15#include <linux/uacce.h>16#include "sec.h"1718#define CAP_FILE_PERMISSION 044419#define SEC_VF_NUM 6320#define SEC_QUEUE_NUM_V1 409621#define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa2552223#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF24#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd25#define SEC_BD_ERR_CHK_EN3 0xffffbfff2627#define SEC_SQE_SIZE 12828#define SEC_PF_DEF_Q_NUM 25629#define SEC_PF_DEF_Q_BASE 030#define SEC_CTX_Q_NUM_DEF 231#define SEC_CTX_Q_NUM_MAX 323233#define SEC_CTRL_CNT_CLR_CE 0x30112034#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)35#define SEC_CORE_INT_SOURCE 0x30101036#define SEC_CORE_INT_MASK 0x30100037#define SEC_CORE_INT_STATUS 0x30100838#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C1439#define SEC_ECC_NUM 1640#define SEC_ECC_MASH 0xFF41#define SEC_CORE_INT_DISABLE 0x04243#define SEC_RAS_CE_REG 0x30105044#define SEC_RAS_FE_REG 0x30105445#define SEC_RAS_NFE_REG 0x30105846#define SEC_RAS_FE_ENB_MSK 0x047#define SEC_OOO_SHUTDOWN_SEL 0x30101448#define SEC_RAS_DISABLE 0x049#define SEC_AXI_ERROR_MASK (BIT(0) | BIT(1))5051#define SEC_MEM_START_INIT_REG 0x30110052#define SEC_MEM_INIT_DONE_REG 0x3011045354/* clock gating */55#define SEC_CONTROL_REG 0x30120056#define SEC_DYNAMIC_GATE_REG 0x30121c57#define SEC_CORE_AUTO_GATE 0x30212c58#define SEC_DYNAMIC_GATE_EN 0x7fff59#define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0)60#define SEC_CLK_GATE_ENABLE BIT(3)61#define SEC_CLK_GATE_DISABLE (~BIT(3))6263#define SEC_TRNG_EN_SHIFT 864#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)65#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF6667#define SEC_INTERFACE_USER_CTRL0_REG 0x30122068#define SEC_INTERFACE_USER_CTRL1_REG 0x30122469#define SEC_SAA_EN_REG 0x30127070#define SEC_BD_ERR_CHK_EN_REG0 0x30138071#define SEC_BD_ERR_CHK_EN_REG1 0x30138472#define SEC_BD_ERR_CHK_EN_REG3 0x30138c7374#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))75#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))76#define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24)77#define SEC_USER1_ENABLE_DATA_SSV BIT(16)78#define SEC_USER1_WB_CONTEXT_SSV BIT(8)79#define SEC_USER1_WB_DATA_SSV BIT(0)80#define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \81SEC_USER1_ENABLE_DATA_SSV | \82SEC_USER1_WB_CONTEXT_SSV | \83SEC_USER1_WB_DATA_SSV)84#define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)85#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)86#define SEC_INTERFACE_USER_CTRL0_REG_V3 0x30222087#define SEC_INTERFACE_USER_CTRL1_REG_V3 0x30222488#define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5))89#define SEC_USER1_SMMU_MASK_V3 0xFF79E79E90#define SEC_CORE_INT_STATUS_M_ECC BIT(2)9192#define SEC_PREFETCH_CFG 0x30113093#define SEC_SVA_TRANS 0x301EC494#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))95#define SEC_PREFETCH_DISABLE BIT(1)96#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))97#define SEC_SVA_PREFETCH_INFO 0x301ED498#define SEC_SVA_STALL_NUM GENMASK(23, 8)99#define SEC_SVA_PREFETCH_NUM GENMASK(2, 0)100#define SEC_WAIT_SVA_READY 500000101#define SEC_READ_SVA_STATUS_TIMES 3102#define SEC_WAIT_US_MIN 10103#define SEC_WAIT_US_MAX 20104#define SEC_WAIT_QP_US_MIN 1000105#define SEC_WAIT_QP_US_MAX 2000106#define SEC_MAX_WAIT_TIMES 2000107108#define SEC_DELAY_10_US 10109#define SEC_POLL_TIMEOUT_US 1000110#define SEC_DBGFS_VAL_MAX_LEN 20111#define SEC_SINGLE_PORT_MAX_TRANS 0x2060112113#define SEC_SQE_MASK_OFFSET 16114#define SEC_SQE_MASK_LEN 108115#define SEC_SHAPER_TYPE_RATE 400116117#define SEC_DFX_BASE 0x301000118#define SEC_DFX_CORE 0x302100119#define SEC_DFX_COMMON1 0x301600120#define SEC_DFX_COMMON2 0x301C00121#define SEC_DFX_BASE_LEN 0x9D122#define SEC_DFX_CORE_LEN 0x32B123#define SEC_DFX_COMMON1_LEN 0x45124#define SEC_DFX_COMMON2_LEN 0xBA125126#define SEC_ALG_BITMAP_SHIFT 32127128#define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \129GENMASK(24, 21))130#define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \131GENMASK_ULL(42, 25))132#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \133GENMASK_ULL(45, 43))134135struct sec_hw_error {136u32 int_msk;137const char *msg;138};139140struct sec_dfx_item {141const char *name;142u32 offset;143};144145static const char sec_name[] = "hisi_sec2";146static struct dentry *sec_debugfs_root;147148static struct hisi_qm_list sec_devices = {149.register_to_crypto = sec_register_to_crypto,150.unregister_from_crypto = sec_unregister_from_crypto,151};152153static const struct hisi_qm_cap_info sec_basic_info[] = {154{SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77},155{SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77},156{SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},157{SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},158{SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177},159{SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177},160{SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177},161{SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088},162{SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1},163{SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},164{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},165{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},166{SEC_CORE_ENABLE_BITMAP, 0x3140, 0, GENMASK(31, 0), 0x17F, 0x17F, 0xF},167{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},168{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},169{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},170{SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},171{SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},172{SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},173{SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},174{SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},175{SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},176{SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},177{SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},178{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},179};180181static const struct hisi_qm_cap_query_info sec_cap_query_info[] = {182{QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C77, 0x7C77},183{QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},184{QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},185{SEC_RAS_NFE_TYPE, "SEC_RAS_NFE_TYPE ", 0x3130, 0x0, 0x177, 0x60177},186{SEC_RAS_NFE_RESET, "SEC_RAS_NFE_RESET ", 0x3134, 0x0, 0x177, 0x177},187{SEC_RAS_CE_TYPE, "SEC_RAS_CE_TYPE ", 0x3138, 0x0, 0x88, 0xC088},188{SEC_CORE_INFO, "SEC_CORE_INFO ", 0x313c, 0x110404, 0x110404, 0x110404},189{SEC_CORE_EN, "SEC_CORE_EN ", 0x3140, 0x17F, 0x17F, 0xF},190{SEC_DRV_ALG_BITMAP_LOW_TB, "SEC_DRV_ALG_BITMAP_LOW ",1910x3144, 0x18050CB, 0x18050CB, 0x18670CF},192{SEC_DRV_ALG_BITMAP_HIGH_TB, "SEC_DRV_ALG_BITMAP_HIGH ",1930x3148, 0x395C, 0x395C, 0x395C},194{SEC_ALG_BITMAP_LOW, "SEC_ALG_BITMAP_LOW ",1950x314c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},196{SEC_ALG_BITMAP_HIGH, "SEC_ALG_BITMAP_HIGH ", 0x3150, 0x3FFF, 0x3FFF, 0x3FFF},197{SEC_CORE1_BITMAP_LOW, "SEC_CORE1_BITMAP_LOW ",1980x3154, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},199{SEC_CORE1_BITMAP_HIGH, "SEC_CORE1_BITMAP_HIGH ", 0x3158, 0x3FFF, 0x3FFF, 0x3FFF},200{SEC_CORE2_BITMAP_LOW, "SEC_CORE2_BITMAP_LOW ",2010x315c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},202{SEC_CORE2_BITMAP_HIGH, "SEC_CORE2_BITMAP_HIGH ", 0x3160, 0x3FFF, 0x3FFF, 0x3FFF},203{SEC_CORE3_BITMAP_LOW, "SEC_CORE3_BITMAP_LOW ",2040x3164, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},205{SEC_CORE3_BITMAP_HIGH, "SEC_CORE3_BITMAP_HIGH ", 0x3168, 0x3FFF, 0x3FFF, 0x3FFF},206{SEC_CORE4_BITMAP_LOW, "SEC_CORE4_BITMAP_LOW ",2070x316c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},208{SEC_CORE4_BITMAP_HIGH, "SEC_CORE4_BITMAP_HIGH ", 0x3170, 0x3FFF, 0x3FFF, 0x3FFF},209};210211static const struct qm_dev_alg sec_dev_algs[] = { {212.alg_msk = SEC_CIPHER_BITMAP,213.alg = "cipher\n",214}, {215.alg_msk = SEC_DIGEST_BITMAP,216.alg = "digest\n",217}, {218.alg_msk = SEC_AEAD_BITMAP,219.alg = "aead\n",220},221};222223static const struct sec_hw_error sec_hw_errors[] = {224{225.int_msk = BIT(0),226.msg = "sec_axi_rresp_err_rint"227},228{229.int_msk = BIT(1),230.msg = "sec_axi_bresp_err_rint"231},232{233.int_msk = BIT(2),234.msg = "sec_ecc_2bit_err_rint"235},236{237.int_msk = BIT(3),238.msg = "sec_ecc_1bit_err_rint"239},240{241.int_msk = BIT(4),242.msg = "sec_req_trng_timeout_rint"243},244{245.int_msk = BIT(5),246.msg = "sec_fsm_hbeat_rint"247},248{249.int_msk = BIT(6),250.msg = "sec_channel_req_rng_timeout_rint"251},252{253.int_msk = BIT(7),254.msg = "sec_bd_err_rint"255},256{257.int_msk = BIT(8),258.msg = "sec_chain_buff_err_rint"259},260{261.int_msk = BIT(14),262.msg = "sec_no_secure_access"263},264{265.int_msk = BIT(15),266.msg = "sec_wrapping_key_auth_err"267},268{269.int_msk = BIT(16),270.msg = "sec_km_key_crc_fail"271},272{273.int_msk = BIT(17),274.msg = "sec_axi_poison_err"275},276{277.int_msk = BIT(18),278.msg = "sec_sva_err"279},280{}281};282283static const char * const sec_dbg_file_name[] = {284[SEC_CLEAR_ENABLE] = "clear_enable",285};286287static struct sec_dfx_item sec_dfx_labels[] = {288{"send_cnt", offsetof(struct sec_dfx, send_cnt)},289{"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},290{"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},291{"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},292{"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},293{"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},294{"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},295};296297static const struct debugfs_reg32 sec_dfx_regs[] = {298{"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},299{"SEC_SAA_EN ", 0x301270},300{"SEC_BD_LATENCY_MIN ", 0x301600},301{"SEC_BD_LATENCY_MAX ", 0x301608},302{"SEC_BD_LATENCY_AVG ", 0x30160C},303{"SEC_BD_NUM_IN_SAA0 ", 0x301670},304{"SEC_BD_NUM_IN_SAA1 ", 0x301674},305{"SEC_BD_NUM_IN_SEC ", 0x301680},306{"SEC_ECC_1BIT_CNT ", 0x301C00},307{"SEC_ECC_1BIT_INFO ", 0x301C04},308{"SEC_ECC_2BIT_CNT ", 0x301C10},309{"SEC_ECC_2BIT_INFO ", 0x301C14},310{"SEC_BD_SAA0 ", 0x301C20},311{"SEC_BD_SAA1 ", 0x301C24},312{"SEC_BD_SAA2 ", 0x301C28},313{"SEC_BD_SAA3 ", 0x301C2C},314{"SEC_BD_SAA4 ", 0x301C30},315{"SEC_BD_SAA5 ", 0x301C34},316{"SEC_BD_SAA6 ", 0x301C38},317{"SEC_BD_SAA7 ", 0x301C3C},318{"SEC_BD_SAA8 ", 0x301C40},319{"SEC_RAS_CE_ENABLE ", 0x301050},320{"SEC_RAS_FE_ENABLE ", 0x301054},321{"SEC_RAS_NFE_ENABLE ", 0x301058},322{"SEC_REQ_TRNG_TIME_TH ", 0x30112C},323{"SEC_CHANNEL_RNG_REQ_THLD ", 0x302110},324};325326/* define the SEC's dfx regs region and region length */327static struct dfx_diff_registers sec_diff_regs[] = {328{329.reg_offset = SEC_DFX_BASE,330.reg_len = SEC_DFX_BASE_LEN,331}, {332.reg_offset = SEC_DFX_COMMON1,333.reg_len = SEC_DFX_COMMON1_LEN,334}, {335.reg_offset = SEC_DFX_COMMON2,336.reg_len = SEC_DFX_COMMON2_LEN,337}, {338.reg_offset = SEC_DFX_CORE,339.reg_len = SEC_DFX_CORE_LEN,340},341};342343static int sec_diff_regs_show(struct seq_file *s, void *unused)344{345struct hisi_qm *qm = s->private;346347hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,348ARRAY_SIZE(sec_diff_regs));349350return 0;351}352DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);353354static bool pf_q_num_flag;355static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)356{357pf_q_num_flag = true;358359return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);360}361362static const struct kernel_param_ops sec_pf_q_num_ops = {363.set = sec_pf_q_num_set,364.get = param_get_int,365};366367static u32 pf_q_num = SEC_PF_DEF_Q_NUM;368module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);369MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");370371static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)372{373u32 ctx_q_num;374int ret;375376if (!val)377return -EINVAL;378379ret = kstrtou32(val, 10, &ctx_q_num);380if (ret)381return -EINVAL;382383if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {384pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);385return -EINVAL;386}387388return param_set_int(val, kp);389}390391static const struct kernel_param_ops sec_ctx_q_num_ops = {392.set = sec_ctx_q_num_set,393.get = param_get_int,394};395static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;396module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);397MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");398399static const struct kernel_param_ops vfs_num_ops = {400.set = vfs_num_set,401.get = param_get_int,402};403404static u32 vfs_num;405module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);406MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");407408void sec_destroy_qps(struct hisi_qp **qps, int qp_num)409{410hisi_qm_free_qps(qps, qp_num);411kfree(qps);412}413414struct hisi_qp **sec_create_qps(void)415{416int node = cpu_to_node(raw_smp_processor_id());417u32 ctx_num = ctx_q_num;418struct hisi_qp **qps;419int ret;420421qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);422if (!qps)423return NULL;424425ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);426if (!ret)427return qps;428429kfree(qps);430return NULL;431}432433u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)434{435u32 cap_val_h, cap_val_l;436437cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val;438cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val;439440return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;441}442443static const struct kernel_param_ops sec_uacce_mode_ops = {444.set = uacce_mode_set,445.get = param_get_int,446};447448/*449* uacce_mode = 0 means sec only register to crypto,450* uacce_mode = 1 means sec both register to crypto and uacce.451*/452static u32 uacce_mode = UACCE_MODE_NOUACCE;453module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);454MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);455456static const struct pci_device_id sec_dev_ids[] = {457{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) },458{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },459{ 0, }460};461MODULE_DEVICE_TABLE(pci, sec_dev_ids);462463static void sec_set_endian(struct hisi_qm *qm)464{465u32 reg;466467reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);468reg &= ~(BIT(1) | BIT(0));469if (!IS_ENABLED(CONFIG_64BIT))470reg |= BIT(1);471472if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))473reg |= BIT(0);474475writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);476}477478static int sec_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)479{480u32 val, try_times = 0;481u8 count = 0;482483/*484* Read the register value every 10-20us. If the value is 0 for three485* consecutive times, the SVA module is ready.486*/487do {488val = readl(qm->io_base + offset);489if (val & mask)490count = 0;491else if (++count == SEC_READ_SVA_STATUS_TIMES)492break;493494usleep_range(SEC_WAIT_US_MIN, SEC_WAIT_US_MAX);495} while (++try_times < SEC_WAIT_SVA_READY);496497if (try_times == SEC_WAIT_SVA_READY) {498pci_err(qm->pdev, "failed to wait sva prefetch ready\n");499return -ETIMEDOUT;500}501502return 0;503}504505static void sec_close_sva_prefetch(struct hisi_qm *qm)506{507u32 val;508int ret;509510if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))511return;512513val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);514val |= SEC_PREFETCH_DISABLE;515writel(val, qm->io_base + SEC_PREFETCH_CFG);516517ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,518val, !(val & SEC_SVA_DISABLE_READY),519SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);520if (ret)521pci_err(qm->pdev, "failed to close sva prefetch\n");522523(void)sec_wait_sva_ready(qm, SEC_SVA_PREFETCH_INFO, SEC_SVA_STALL_NUM);524}525526static void sec_open_sva_prefetch(struct hisi_qm *qm)527{528u32 val;529int ret;530531if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))532return;533534/* Enable prefetch */535val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);536val &= SEC_PREFETCH_ENABLE;537writel(val, qm->io_base + SEC_PREFETCH_CFG);538539ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,540val, !(val & SEC_PREFETCH_DISABLE),541SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);542if (ret) {543pci_err(qm->pdev, "failed to open sva prefetch\n");544sec_close_sva_prefetch(qm);545return;546}547548ret = sec_wait_sva_ready(qm, SEC_SVA_TRANS, SEC_SVA_PREFETCH_NUM);549if (ret)550sec_close_sva_prefetch(qm);551}552553static void sec_engine_sva_config(struct hisi_qm *qm)554{555u32 reg;556557if (qm->ver > QM_HW_V2) {558reg = readl_relaxed(qm->io_base +559SEC_INTERFACE_USER_CTRL0_REG_V3);560reg |= SEC_USER0_SMMU_NORMAL;561writel_relaxed(reg, qm->io_base +562SEC_INTERFACE_USER_CTRL0_REG_V3);563564reg = readl_relaxed(qm->io_base +565SEC_INTERFACE_USER_CTRL1_REG_V3);566reg &= SEC_USER1_SMMU_MASK_V3;567reg |= SEC_USER1_SMMU_NORMAL_V3;568writel_relaxed(reg, qm->io_base +569SEC_INTERFACE_USER_CTRL1_REG_V3);570} else {571reg = readl_relaxed(qm->io_base +572SEC_INTERFACE_USER_CTRL0_REG);573reg |= SEC_USER0_SMMU_NORMAL;574writel_relaxed(reg, qm->io_base +575SEC_INTERFACE_USER_CTRL0_REG);576reg = readl_relaxed(qm->io_base +577SEC_INTERFACE_USER_CTRL1_REG);578reg &= SEC_USER1_SMMU_MASK;579if (qm->use_sva)580reg |= SEC_USER1_SMMU_SVA;581else582reg |= SEC_USER1_SMMU_NORMAL;583writel_relaxed(reg, qm->io_base +584SEC_INTERFACE_USER_CTRL1_REG);585}586sec_open_sva_prefetch(qm);587}588589static void sec_enable_clock_gate(struct hisi_qm *qm)590{591u32 val;592593if (qm->ver < QM_HW_V3)594return;595596val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);597val |= SEC_CLK_GATE_ENABLE;598writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);599600val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);601val |= SEC_DYNAMIC_GATE_EN;602writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);603604val = readl(qm->io_base + SEC_CORE_AUTO_GATE);605val |= SEC_CORE_AUTO_GATE_EN;606writel(val, qm->io_base + SEC_CORE_AUTO_GATE);607}608609static void sec_disable_clock_gate(struct hisi_qm *qm)610{611u32 val;612613/* Kunpeng920 needs to close clock gating */614val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);615val &= SEC_CLK_GATE_DISABLE;616writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);617}618619static int sec_engine_init(struct hisi_qm *qm)620{621int ret;622u32 reg;623624/* disable clock gate control before mem init */625sec_disable_clock_gate(qm);626627writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);628629ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,630reg, reg & 0x1, SEC_DELAY_10_US,631SEC_POLL_TIMEOUT_US);632if (ret) {633pci_err(qm->pdev, "fail to init sec mem\n");634return ret;635}636637reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);638reg |= (0x1 << SEC_TRNG_EN_SHIFT);639writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);640641sec_engine_sva_config(qm);642643writel(SEC_SINGLE_PORT_MAX_TRANS,644qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);645646reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);647writel(reg, qm->io_base + SEC_SAA_EN_REG);648649if (qm->ver < QM_HW_V3) {650/* HW V2 enable sm4 extra mode, as ctr/ecb */651writel_relaxed(SEC_BD_ERR_CHK_EN0,652qm->io_base + SEC_BD_ERR_CHK_EN_REG0);653654/* HW V2 enable sm4 xts mode multiple iv */655writel_relaxed(SEC_BD_ERR_CHK_EN1,656qm->io_base + SEC_BD_ERR_CHK_EN_REG1);657writel_relaxed(SEC_BD_ERR_CHK_EN3,658qm->io_base + SEC_BD_ERR_CHK_EN_REG3);659}660661/* config endian */662sec_set_endian(qm);663664sec_enable_clock_gate(qm);665666return 0;667}668669static int sec_set_user_domain_and_cache(struct hisi_qm *qm)670{671/* qm user domain */672writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);673writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);674writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);675writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);676writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);677678/* qm cache */679writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);680writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);681682/* disable FLR triggered by BME(bus master enable) */683writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);684writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);685686/* enable sqc,cqc writeback */687writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |688CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |689FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);690691return sec_engine_init(qm);692}693694/* sec_debug_regs_clear() - clear the sec debug regs */695static void sec_debug_regs_clear(struct hisi_qm *qm)696{697int i;698699/* clear sec dfx regs */700writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);701for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)702readl(qm->io_base + sec_dfx_regs[i].offset);703704/* clear rdclr_en */705writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);706707hisi_qm_debug_regs_clear(qm);708}709710static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)711{712u32 val1, val2;713714val1 = readl(qm->io_base + SEC_CONTROL_REG);715if (enable) {716val1 |= SEC_AXI_SHUTDOWN_ENABLE;717val2 = qm->err_info.dev_err.shutdown_mask;718} else {719val1 &= SEC_AXI_SHUTDOWN_DISABLE;720val2 = 0x0;721}722723if (qm->ver > QM_HW_V2)724writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL);725726writel(val1, qm->io_base + SEC_CONTROL_REG);727}728729static void sec_hw_error_enable(struct hisi_qm *qm)730{731struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;732u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;733734if (qm->ver == QM_HW_V1) {735writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);736pci_info(qm->pdev, "V1 not support hw error handle\n");737return;738}739740/* clear SEC hw error source if having */741writel(err_mask, qm->io_base + SEC_CORE_INT_SOURCE);742743/* enable RAS int */744writel(dev_err->ce, qm->io_base + SEC_RAS_CE_REG);745writel(dev_err->fe, qm->io_base + SEC_RAS_FE_REG);746writel(dev_err->nfe, qm->io_base + SEC_RAS_NFE_REG);747748/* enable SEC block master OOO when nfe occurs on Kunpeng930 */749sec_master_ooo_ctrl(qm, true);750751/* enable SEC hw error interrupts */752writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);753}754755static void sec_hw_error_disable(struct hisi_qm *qm)756{757/* disable SEC hw error interrupts */758writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);759760/* disable SEC block master OOO when nfe occurs on Kunpeng930 */761sec_master_ooo_ctrl(qm, false);762763/* disable RAS int */764writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);765writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);766writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);767}768769static u32 sec_clear_enable_read(struct hisi_qm *qm)770{771return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &772SEC_CTRL_CNT_CLR_CE_BIT;773}774775static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)776{777u32 tmp;778779if (val != 1 && val)780return -EINVAL;781782tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &783~SEC_CTRL_CNT_CLR_CE_BIT) | val;784writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);785786return 0;787}788789static ssize_t sec_debug_read(struct file *filp, char __user *buf,790size_t count, loff_t *pos)791{792struct sec_debug_file *file = filp->private_data;793char tbuf[SEC_DBGFS_VAL_MAX_LEN];794struct hisi_qm *qm = file->qm;795u32 val;796int ret;797798ret = hisi_qm_get_dfx_access(qm);799if (ret)800return ret;801802spin_lock_irq(&file->lock);803804switch (file->index) {805case SEC_CLEAR_ENABLE:806val = sec_clear_enable_read(qm);807break;808default:809goto err_input;810}811812spin_unlock_irq(&file->lock);813814hisi_qm_put_dfx_access(qm);815ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);816return simple_read_from_buffer(buf, count, pos, tbuf, ret);817818err_input:819spin_unlock_irq(&file->lock);820hisi_qm_put_dfx_access(qm);821return -EINVAL;822}823824static ssize_t sec_debug_write(struct file *filp, const char __user *buf,825size_t count, loff_t *pos)826{827struct sec_debug_file *file = filp->private_data;828char tbuf[SEC_DBGFS_VAL_MAX_LEN];829struct hisi_qm *qm = file->qm;830unsigned long val;831int len, ret;832833if (*pos != 0)834return 0;835836if (count >= SEC_DBGFS_VAL_MAX_LEN)837return -ENOSPC;838839len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,840pos, buf, count);841if (len < 0)842return len;843844tbuf[len] = '\0';845if (kstrtoul(tbuf, 0, &val))846return -EFAULT;847848ret = hisi_qm_get_dfx_access(qm);849if (ret)850return ret;851852spin_lock_irq(&file->lock);853854switch (file->index) {855case SEC_CLEAR_ENABLE:856ret = sec_clear_enable_write(qm, val);857if (ret)858goto err_input;859break;860default:861ret = -EINVAL;862goto err_input;863}864865ret = count;866867err_input:868spin_unlock_irq(&file->lock);869hisi_qm_put_dfx_access(qm);870return ret;871}872873static const struct file_operations sec_dbg_fops = {874.owner = THIS_MODULE,875.open = simple_open,876.read = sec_debug_read,877.write = sec_debug_write,878};879880static int sec_debugfs_atomic64_get(void *data, u64 *val)881{882*val = atomic64_read((atomic64_t *)data);883884return 0;885}886887static int sec_debugfs_atomic64_set(void *data, u64 val)888{889if (val)890return -EINVAL;891892atomic64_set((atomic64_t *)data, 0);893894return 0;895}896897DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,898sec_debugfs_atomic64_set, "%lld\n");899900static int sec_regs_show(struct seq_file *s, void *unused)901{902hisi_qm_regs_dump(s, s->private);903904return 0;905}906907DEFINE_SHOW_ATTRIBUTE(sec_regs);908909static int sec_cap_regs_show(struct seq_file *s, void *unused)910{911struct hisi_qm *qm = s->private;912u32 i, size;913914size = qm->cap_tables.qm_cap_size;915for (i = 0; i < size; i++)916seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,917qm->cap_tables.qm_cap_table[i].cap_val);918919size = qm->cap_tables.dev_cap_size;920for (i = 0; i < size; i++)921seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,922qm->cap_tables.dev_cap_table[i].cap_val);923924return 0;925}926927DEFINE_SHOW_ATTRIBUTE(sec_cap_regs);928929static int sec_core_debug_init(struct hisi_qm *qm)930{931struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;932struct sec_dev *sec = container_of(qm, struct sec_dev, qm);933struct device *dev = &qm->pdev->dev;934struct sec_dfx *dfx = &sec->debug.dfx;935struct debugfs_regset32 *regset;936struct dentry *tmp_d;937int i;938939tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);940941regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);942if (!regset)943return -ENOMEM;944945regset->regs = sec_dfx_regs;946regset->nregs = ARRAY_SIZE(sec_dfx_regs);947regset->base = qm->io_base;948regset->dev = dev;949950if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF)951debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);952if (qm->fun_type == QM_HW_PF && sec_regs)953debugfs_create_file("diff_regs", 0444, tmp_d,954qm, &sec_diff_regs_fops);955956for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {957atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +958sec_dfx_labels[i].offset);959debugfs_create_file(sec_dfx_labels[i].name, 0644,960tmp_d, data, &sec_atomic64_ops);961}962963debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,964qm->debug.debug_root, qm, &sec_cap_regs_fops);965966return 0;967}968969static int sec_debug_init(struct hisi_qm *qm)970{971struct sec_dev *sec = container_of(qm, struct sec_dev, qm);972int i;973974if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) {975for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {976spin_lock_init(&sec->debug.files[i].lock);977sec->debug.files[i].index = i;978sec->debug.files[i].qm = qm;979980debugfs_create_file(sec_dbg_file_name[i], 0600,981qm->debug.debug_root,982sec->debug.files + i,983&sec_dbg_fops);984}985}986987return sec_core_debug_init(qm);988}989990static int sec_debugfs_init(struct hisi_qm *qm)991{992struct device *dev = &qm->pdev->dev;993int ret;994995ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));996if (ret) {997dev_warn(dev, "Failed to init SEC diff regs!\n");998return ret;999}10001001qm->debug.debug_root = debugfs_create_dir(dev_name(dev),1002sec_debugfs_root);1003qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;1004qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;10051006hisi_qm_debug_init(qm);10071008ret = sec_debug_init(qm);1009if (ret)1010goto debugfs_remove;10111012return 0;10131014debugfs_remove:1015debugfs_remove_recursive(qm->debug.debug_root);1016hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));1017return ret;1018}10191020static void sec_debugfs_exit(struct hisi_qm *qm)1021{1022debugfs_remove_recursive(qm->debug.debug_root);10231024hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));1025}10261027static int sec_show_last_regs_init(struct hisi_qm *qm)1028{1029struct qm_debug *debug = &qm->debug;1030int i;10311032debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs),1033sizeof(unsigned int), GFP_KERNEL);1034if (!debug->last_words)1035return -ENOMEM;10361037for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)1038debug->last_words[i] = readl_relaxed(qm->io_base +1039sec_dfx_regs[i].offset);10401041return 0;1042}10431044static void sec_show_last_regs_uninit(struct hisi_qm *qm)1045{1046struct qm_debug *debug = &qm->debug;10471048if (qm->fun_type == QM_HW_VF || !debug->last_words)1049return;10501051kfree(debug->last_words);1052debug->last_words = NULL;1053}10541055static void sec_show_last_dfx_regs(struct hisi_qm *qm)1056{1057struct qm_debug *debug = &qm->debug;1058struct pci_dev *pdev = qm->pdev;1059u32 val;1060int i;10611062if (qm->fun_type == QM_HW_VF || !debug->last_words)1063return;10641065/* dumps last word of the debugging registers during controller reset */1066for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) {1067val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset);1068if (val != debug->last_words[i])1069pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",1070sec_dfx_regs[i].name, debug->last_words[i], val);1071}1072}10731074static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)1075{1076const struct sec_hw_error *errs = sec_hw_errors;1077struct device *dev = &qm->pdev->dev;1078u32 err_val;10791080while (errs->msg) {1081if (errs->int_msk & err_sts) {1082dev_err(dev, "%s [error status=0x%x] found\n",1083errs->msg, errs->int_msk);10841085if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {1086err_val = readl(qm->io_base +1087SEC_CORE_SRAM_ECC_ERR_INFO);1088dev_err(dev, "multi ecc sram num=0x%x\n",1089((err_val) >> SEC_ECC_NUM) &1090SEC_ECC_MASH);1091}1092}1093errs++;1094}1095}10961097static u32 sec_get_hw_err_status(struct hisi_qm *qm)1098{1099return readl(qm->io_base + SEC_CORE_INT_STATUS);1100}11011102static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)1103{1104writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);1105}11061107static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)1108{1109u32 nfe_mask = qm->err_info.dev_err.nfe;11101111writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);1112}11131114static void sec_enable_error_report(struct hisi_qm *qm)1115{1116u32 nfe_mask = qm->err_info.dev_err.nfe;1117u32 ce_mask = qm->err_info.dev_err.ce;11181119writel(nfe_mask, qm->io_base + SEC_RAS_NFE_REG);1120writel(ce_mask, qm->io_base + SEC_RAS_CE_REG);1121}11221123static void sec_open_axi_master_ooo(struct hisi_qm *qm)1124{1125u32 val;11261127val = readl(qm->io_base + SEC_CONTROL_REG);1128writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);1129writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);1130}11311132static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)1133{1134u32 err_status;11351136err_status = sec_get_hw_err_status(qm);1137if (err_status) {1138if (err_status & qm->err_info.dev_err.ecc_2bits_mask)1139qm->err_status.is_dev_ecc_mbit = true;1140sec_log_hw_error(qm, err_status);11411142if (err_status & qm->err_info.dev_err.reset_mask) {1143/* Disable the same error reporting until device is recovered. */1144sec_disable_error_report(qm, err_status);1145return ACC_ERR_NEED_RESET;1146}1147sec_clear_hw_err_status(qm, err_status);1148/* Avoid firmware disable error report, re-enable. */1149sec_enable_error_report(qm);1150}11511152return ACC_ERR_RECOVERED;1153}11541155static bool sec_dev_is_abnormal(struct hisi_qm *qm)1156{1157u32 err_status;11581159err_status = sec_get_hw_err_status(qm);1160if (err_status & qm->err_info.dev_err.shutdown_mask)1161return true;11621163return false;1164}11651166static void sec_disable_axi_error(struct hisi_qm *qm)1167{1168struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;1169u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;11701171writel(err_mask & ~SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_MASK);11721173if (qm->ver > QM_HW_V2)1174writel(dev_err->shutdown_mask & (~SEC_AXI_ERROR_MASK),1175qm->io_base + SEC_OOO_SHUTDOWN_SEL);1176}11771178static void sec_enable_axi_error(struct hisi_qm *qm)1179{1180struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;1181u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;11821183/* clear axi error source */1184writel(SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_SOURCE);11851186writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);11871188if (qm->ver > QM_HW_V2)1189writel(dev_err->shutdown_mask, qm->io_base + SEC_OOO_SHUTDOWN_SEL);1190}11911192static void sec_err_info_init(struct hisi_qm *qm)1193{1194struct hisi_qm_err_info *err_info = &qm->err_info;1195struct hisi_qm_err_mask *qm_err = &err_info->qm_err;1196struct hisi_qm_err_mask *dev_err = &err_info->dev_err;11971198qm_err->fe = SEC_RAS_FE_ENB_MSK;1199qm_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);1200qm_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);1201qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,1202SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);1203qm_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,1204SEC_QM_RESET_MASK_CAP, qm->cap_ver);1205qm_err->ecc_2bits_mask = QM_ECC_MBIT;12061207dev_err->fe = SEC_RAS_FE_ENB_MSK;1208dev_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);1209dev_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);1210dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,1211SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);1212dev_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,1213SEC_RESET_MASK_CAP, qm->cap_ver);1214dev_err->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;12151216err_info->msi_wr_port = BIT(0);1217err_info->acpi_rst = "SRST";1218}12191220static const struct hisi_qm_err_ini sec_err_ini = {1221.hw_init = sec_set_user_domain_and_cache,1222.hw_err_enable = sec_hw_error_enable,1223.hw_err_disable = sec_hw_error_disable,1224.get_dev_hw_err_status = sec_get_hw_err_status,1225.clear_dev_hw_err_status = sec_clear_hw_err_status,1226.open_axi_master_ooo = sec_open_axi_master_ooo,1227.open_sva_prefetch = sec_open_sva_prefetch,1228.close_sva_prefetch = sec_close_sva_prefetch,1229.show_last_dfx_regs = sec_show_last_dfx_regs,1230.err_info_init = sec_err_info_init,1231.get_err_result = sec_get_err_result,1232.dev_is_abnormal = sec_dev_is_abnormal,1233.disable_axi_error = sec_disable_axi_error,1234.enable_axi_error = sec_enable_axi_error,1235};12361237static int sec_pf_probe_init(struct sec_dev *sec)1238{1239struct hisi_qm *qm = &sec->qm;1240int ret;12411242ret = sec_set_user_domain_and_cache(qm);1243if (ret)1244return ret;12451246hisi_qm_dev_err_init(qm);1247sec_debug_regs_clear(qm);1248ret = sec_show_last_regs_init(qm);1249if (ret)1250pci_err(qm->pdev, "Failed to init last word regs!\n");12511252return ret;1253}12541255static int sec_pre_store_cap_reg(struct hisi_qm *qm)1256{1257struct hisi_qm_cap_record *sec_cap;1258struct pci_dev *pdev = qm->pdev;1259size_t i, size;12601261size = ARRAY_SIZE(sec_cap_query_info);1262sec_cap = devm_kcalloc(&pdev->dev, size, sizeof(*sec_cap), GFP_KERNEL);1263if (!sec_cap)1264return -ENOMEM;12651266for (i = 0; i < size; i++) {1267sec_cap[i].type = sec_cap_query_info[i].type;1268sec_cap[i].name = sec_cap_query_info[i].name;1269sec_cap[i].cap_val = hisi_qm_get_cap_value(qm, sec_cap_query_info,1270i, qm->cap_ver);1271}12721273qm->cap_tables.dev_cap_table = sec_cap;1274qm->cap_tables.dev_cap_size = size;12751276return 0;1277}12781279static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)1280{1281u64 alg_msk;1282int ret;12831284qm->pdev = pdev;1285qm->mode = uacce_mode;1286qm->sqe_size = SEC_SQE_SIZE;1287qm->dev_name = sec_name;12881289qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ?1290QM_HW_PF : QM_HW_VF;1291if (qm->fun_type == QM_HW_PF) {1292qm->qp_base = SEC_PF_DEF_Q_BASE;1293qm->qp_num = pf_q_num;1294qm->debug.curr_qm_qp_num = pf_q_num;1295qm->qm_list = &sec_devices;1296qm->err_ini = &sec_err_ini;1297if (pf_q_num_flag)1298set_bit(QM_MODULE_PARAM, &qm->misc_ctl);1299} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {1300/*1301* have no way to get qm configure in VM in v1 hardware,1302* so currently force PF to uses SEC_PF_DEF_Q_NUM, and force1303* to trigger only one VF in v1 hardware.1304* v2 hardware has no such problem.1305*/1306qm->qp_base = SEC_PF_DEF_Q_NUM;1307qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;1308}13091310ret = hisi_qm_init(qm);1311if (ret) {1312pci_err(qm->pdev, "Failed to init sec qm configures!\n");1313return ret;1314}13151316/* Fetch and save the value of capability registers */1317ret = sec_pre_store_cap_reg(qm);1318if (ret) {1319pci_err(qm->pdev, "Failed to pre-store capability registers!\n");1320hisi_qm_uninit(qm);1321return ret;1322}1323alg_msk = sec_get_alg_bitmap(qm, SEC_ALG_BITMAP_HIGH, SEC_ALG_BITMAP_LOW);1324ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));1325if (ret) {1326pci_err(qm->pdev, "Failed to set sec algs!\n");1327hisi_qm_uninit(qm);1328}13291330return ret;1331}13321333static void sec_qm_uninit(struct hisi_qm *qm)1334{1335hisi_qm_uninit(qm);1336}13371338static int sec_probe_init(struct sec_dev *sec)1339{1340u32 type_rate = SEC_SHAPER_TYPE_RATE;1341struct hisi_qm *qm = &sec->qm;1342int ret;13431344if (qm->fun_type == QM_HW_PF) {1345ret = sec_pf_probe_init(sec);1346if (ret)1347return ret;1348/* enable shaper type 0 */1349if (qm->ver >= QM_HW_V3) {1350type_rate |= QM_SHAPER_ENABLE;1351qm->type_rate = type_rate;1352}1353}13541355return 0;1356}13571358static void sec_probe_uninit(struct hisi_qm *qm)1359{1360if (qm->fun_type == QM_HW_VF)1361return;13621363sec_debug_regs_clear(qm);1364sec_show_last_regs_uninit(qm);1365sec_close_sva_prefetch(qm);1366hisi_qm_dev_err_uninit(qm);1367}13681369static void sec_iommu_used_check(struct sec_dev *sec)1370{1371struct iommu_domain *domain;1372struct device *dev = &sec->qm.pdev->dev;13731374domain = iommu_get_domain_for_dev(dev);13751376/* Check if iommu is used */1377sec->iommu_used = false;1378if (domain) {1379if (domain->type & __IOMMU_DOMAIN_PAGING)1380sec->iommu_used = true;1381dev_info(dev, "SMMU Opened, the iommu type = %u\n",1382domain->type);1383}1384}13851386static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)1387{1388struct sec_dev *sec;1389struct hisi_qm *qm;1390int ret;13911392sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);1393if (!sec)1394return -ENOMEM;13951396qm = &sec->qm;1397ret = sec_qm_init(qm, pdev);1398if (ret) {1399pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);1400return ret;1401}14021403sec->ctx_q_num = ctx_q_num;1404sec_iommu_used_check(sec);14051406ret = sec_probe_init(sec);1407if (ret) {1408pci_err(pdev, "Failed to probe!\n");1409goto err_qm_uninit;1410}14111412ret = hisi_qm_start(qm);1413if (ret) {1414pci_err(pdev, "Failed to start sec qm!\n");1415goto err_probe_uninit;1416}14171418ret = sec_debugfs_init(qm);1419if (ret)1420pci_warn(pdev, "Failed to init debugfs!\n");14211422hisi_qm_add_list(qm, &sec_devices);1423ret = hisi_qm_alg_register(qm, &sec_devices, ctx_q_num);1424if (ret < 0) {1425pr_err("Failed to register driver to crypto.\n");1426goto err_qm_del_list;1427}14281429if (qm->uacce) {1430ret = uacce_register(qm->uacce);1431if (ret) {1432pci_err(pdev, "failed to register uacce (%d)!\n", ret);1433goto err_alg_unregister;1434}1435}14361437if (qm->fun_type == QM_HW_PF && vfs_num) {1438ret = hisi_qm_sriov_enable(pdev, vfs_num);1439if (ret < 0)1440goto err_alg_unregister;1441}14421443hisi_qm_pm_init(qm);14441445return 0;14461447err_alg_unregister:1448hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num);1449err_qm_del_list:1450hisi_qm_del_list(qm, &sec_devices);1451sec_debugfs_exit(qm);1452hisi_qm_stop(qm, QM_NORMAL);1453err_probe_uninit:1454sec_probe_uninit(qm);1455err_qm_uninit:1456sec_qm_uninit(qm);1457return ret;1458}14591460static void sec_remove(struct pci_dev *pdev)1461{1462struct hisi_qm *qm = pci_get_drvdata(pdev);14631464hisi_qm_pm_uninit(qm);1465hisi_qm_wait_task_finish(qm, &sec_devices);1466hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num);1467hisi_qm_del_list(qm, &sec_devices);14681469if (qm->fun_type == QM_HW_PF && qm->vfs_num)1470hisi_qm_sriov_disable(pdev, true);14711472sec_debugfs_exit(qm);14731474(void)hisi_qm_stop(qm, QM_NORMAL);1475sec_probe_uninit(qm);14761477sec_qm_uninit(qm);1478}14791480static const struct dev_pm_ops sec_pm_ops = {1481SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)1482};14831484static const struct pci_error_handlers sec_err_handler = {1485.error_detected = hisi_qm_dev_err_detected,1486.slot_reset = hisi_qm_dev_slot_reset,1487.reset_prepare = hisi_qm_reset_prepare,1488.reset_done = hisi_qm_reset_done,1489};14901491static struct pci_driver sec_pci_driver = {1492.name = "hisi_sec2",1493.id_table = sec_dev_ids,1494.probe = sec_probe,1495.remove = sec_remove,1496.err_handler = &sec_err_handler,1497.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?1498hisi_qm_sriov_configure : NULL,1499.shutdown = hisi_qm_dev_shutdown,1500.driver.pm = &sec_pm_ops,1501};15021503struct pci_driver *hisi_sec_get_pf_driver(void)1504{1505return &sec_pci_driver;1506}1507EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver);15081509static void sec_register_debugfs(void)1510{1511if (!debugfs_initialized())1512return;15131514sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);1515}15161517static void sec_unregister_debugfs(void)1518{1519debugfs_remove_recursive(sec_debugfs_root);1520}15211522static int __init sec_init(void)1523{1524int ret;15251526hisi_qm_init_list(&sec_devices);1527sec_register_debugfs();15281529ret = pci_register_driver(&sec_pci_driver);1530if (ret < 0) {1531sec_unregister_debugfs();1532pr_err("Failed to register pci driver.\n");1533return ret;1534}15351536return 0;1537}15381539static void __exit sec_exit(void)1540{1541pci_unregister_driver(&sec_pci_driver);1542sec_unregister_debugfs();1543}15441545module_init(sec_init);1546module_exit(sec_exit);15471548MODULE_LICENSE("GPL v2");1549MODULE_AUTHOR("Zaibo Xu <[email protected]>");1550MODULE_AUTHOR("Longfang Liu <[email protected]>");1551MODULE_AUTHOR("Kai Ye <[email protected]>");1552MODULE_AUTHOR("Wei Zhang <[email protected]>");1553MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");155415551556