#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0
+#define MAX_DEV_CMD_ENTRIES 2
+#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
+
static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
MODULE_PARM_DESC(poll_queues,
"Number of poll queues used for r/w. Default value is 1");
+/**
+ * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * @hba - per adapter instance
+ *
+ * Returns queue-depth on success, non-zero on error
+ *
+ * MAC - Max. Active Command of the Host Controller (HC)
+ * HC wouldn't send more than this commands to the device.
+ * It is mandatory to implement get_hba_mac() to enable MCQ mode.
+ * Calculates and adjusts the queue depth based on the depth
+ * supported by the HC and ufs device.
+ */
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+{
+ int mac;
+
+ /* Mandatory to implement get_hba_mac() */
+ mac = ufshcd_mcq_vops_get_hba_mac(hba);
+ if (mac < 0) {
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ return mac;
+ }
+
+ WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
+ /*
+ * max. value of bqueuedepth = 256, mac is host dependent.
+ * It is mandatory for UFS device to define bQueueDepth if
+ * shared queuing architecture is enabled.
+ */
+ return min_t(int, mac, hba->dev_info.bqueuedepth);
+}
+
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
{
int i;
enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
int ufshcd_mcq_init(struct ufs_hba *hba);
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
#define SD_ASCII_STD true
#define SD_RAW false
return -EOPNOTSUPP;
}
+static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->get_hba_mac)
+ return hba->vops->get_hba_mac(hba);
+
+ return -EOPNOTSUPP;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{
- return ufshcd_mcq_init(hba);
+ int ret;
+ int old_nutrs = hba->nutrs;
+
+ ret = ufshcd_mcq_decide_queue_depth(hba);
+ if (ret < 0)
+ return ret;
+
+ hba->nutrs = ret;
+ ret = ufshcd_mcq_init(hba);
+ if (ret) {
+ hba->nutrs = old_nutrs;
+ return ret;
+ }
+
+ return 0;
}
static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
return ret;
}
+static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
+{
+ /* Qualcomm HC supports up to 64 */
+ return MAX_SUPP_MAC;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
.program_key = ufs_qcom_ice_program_key,
.reinit_notify = ufs_qcom_reinit_notify,
.mcq_config_resource = ufs_qcom_mcq_config_resource,
+ .get_hba_mac = ufs_qcom_get_hba_mac,
};
/**
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
#define BUS_VECTOR_NAME_LEN 32
+#define MAX_SUPP_MAC 64
#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
u8 *model;
u16 wspecversion;
u32 clk_gating_wait_us;
+ /* Stores the depth of queue in UFS device */
+ u8 bqueuedepth;
/* UFS HPB related flag */
bool hpb_enabled;
* @event_notify: called to notify important events
* @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources
+ * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
*/
struct ufs_hba_variant_ops {
const char *name;
enum ufs_event_type evt, void *data);
void (*reinit_notify)(struct ufs_hba *);
int (*mcq_config_resource)(struct ufs_hba *hba);
+ int (*get_hba_mac)(struct ufs_hba *hba);
};
/* clock gating state */
REG_UFS_CCAP = 0x100,
REG_UFS_CRYPTOCAP = 0x104,
+ REG_UFS_MCQ_CFG = 0x380,
UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
};