@@ -105,6 +105,25 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
}
#ifdef CONFIG_SCSI_UFS_CRYPTO
+/**
+ * ufs_qcom_config_ice_allocator() - ICE core allocator configuration
+ *
+ * @host: pointer to qcom specific variant structure.
+ */
+static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
+{
+ struct ufs_hba *hba = host->hba;
+ unsigned int val[4] = { NUM_RX_R1W0, NUM_TX_R0W1, NUM_RX_R1W1, NUM_TX_R1W1 };
+ unsigned int config;
+
+ if (!is_ice_config_supported(host))
+ return;
+
+ config = val[0] | (val[1] << 8) | (val[2] << 16) | (val[3] << 24);
+
+ ufshcd_writel(hba, ICE_ALLOCATOR_TYPE, REG_UFS_MEM_ICE_CONFIG);
+ ufshcd_writel(hba, config, REG_UFS_MEM_ICE_NUM_CORE);
+}
static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
{
@@ -196,6 +215,11 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
{
return 0;
}
+
+static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
+{
+}
+
#endif
static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
@@ -435,6 +459,8 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
err = ufs_qcom_enable_lane_clks(host);
break;
case POST_CHANGE:
+ ufs_qcom_config_ice_allocator(host);
+
/* check if UFS PHY moved from DISABLED to HIBERN8 */
err = ufs_qcom_check_hibern8(hba);
ufs_qcom_enable_hw_clk_gating(hba);
@@ -932,6 +958,14 @@ static void ufs_qcom_set_host_params(struct ufs_hba *hba)
host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba);
}
+static void ufs_qcom_set_host_caps(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major >= 0x5)
+ host->caps |= UFS_QCOM_CAP_ICE_CONFIG;
+}
+
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
@@ -940,6 +974,8 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
+ ufs_qcom_set_host_caps(hba);
}
/**
@@ -196,7 +196,8 @@ struct ufs_qcom_host {
#ifdef CONFIG_SCSI_UFS_CRYPTO
struct qcom_ice *ice;
#endif
-
+ #define UFS_QCOM_CAP_ICE_CONFIG BIT(0)
+ u32 caps;
void __iomem *dev_ref_clk_ctrl_mmio;
bool is_dev_ref_clk_enabled;
struct ufs_hw_version hw_ver;
@@ -226,6 +227,48 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
};
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+static inline bool is_ice_config_supported(struct ufs_qcom_host *host)
+{
+ return (host->caps & UFS_QCOM_CAP_ICE_CONFIG);
+}
+
+/* ICE configuration to share AES engines among TX stream and RX stream */
+#define ICE_ALLOCATOR_TYPE 2
+#define REG_UFS_MEM_ICE_CONFIG 0x260C
+#define REG_UFS_MEM_ICE_NUM_CORE 0x2664
+
+
+/*
+ * Number of cores allocated for RX stream when Read data block received and
+ * Write data block is not in progress
+ */
+#define NUM_RX_R1W0 28
+
+/*
+ * Number of cores allocated for TX stream when Device asked to send write
+ * data block and Read data block is not in progress
+ */
+#define NUM_TX_R0W1 28
+
+/*
+ * Number of cores allocated for RX stream when Read data block received and
+ * Write data block is in progress
+ * OR
+ * Device asked to send write data block and Read data block is in progress
+ */
+#define NUM_RX_R1W1 15
+
+/*
+ * Number of cores allocated for TX stream (UFS write) when Read data block
+ * received and Write data block is in progress
+ * OR
+ * Device asked to send write data block and Read data block is in progress
+ */
+#define NUM_TX_R1W1 13
+
+#endif /* UFS_CRYPTO */
+
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)