static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
- int err = 0;
+ int err;
struct device *dev = host->hba->dev;
if (host->is_lane_clks_enabled)
err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
host->rx_l0_sync_clk);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
host->tx_l0_sync_clk);
goto disable_rx_l1;
host->is_lane_clks_enabled = true;
- goto out;
+
+ return 0;
disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
clk_disable_unprepare(host->rx_l0_sync_clk);
-out:
+
return err;
}
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
&host->rx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
&host->tx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk, true);
}
-out:
- return err;
+
+ return 0;
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
if (!host->core_reset) {
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
- goto out;
+ return 0;
}
reenable_intr = hba->is_irq_enabled;
if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
/*
hba->is_irq_enabled = true;
}
-out:
- return ret;
+ return 0;
}
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
+ int ret;
bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
/* Reset UFS Host Controller and PHY */
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
/* power on phy - start serdes and phy's power and clocks */
out_disable_phy:
phy_exit(phy);
-out:
+
return ret;
}
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
{
- int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
* Aggregation logic.
*/
if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
- goto out;
+ return 0;
if (gear == 0) {
dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- goto out_error;
+ return -EINVAL;
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
}
if (ufs_qcom_cap_qunipro(host))
- goto out;
+ return 0;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rA));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
} else if (rate == PA_HS_MODE_B) {
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rB));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
} else {
dev_err(hba->dev, "%s: invalid rate = %d\n",
__func__, rate);
- goto out_error;
+ return -EINVAL;
}
break;
case SLOWAUTO_MODE:
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(pwm_fr_table));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
break;
case UNCHANGED:
default:
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- goto out_error;
+ return -EINVAL;
}
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
*/
mb();
}
- goto out;
-out_error:
- ret = -EINVAL;
-out:
- return ret;
+ return 0;
}
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
0, true)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (ufs_qcom_cap_qunipro(host))
break;
}
-out:
return err;
}
struct ufs_dev_params ufs_qcom_cap;
int ret = 0;
+ if (!dev_req_params) {
+ pr_err("%s: incoming dev_req_params is NULL\n", __func__);
+ return -EINVAL;
+ }
+
switch (status) {
case PRE_CHANGE:
ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
if (ret) {
dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
- goto out;
+ return ret;
}
/* enable the device ref clock before changing to HS mode */
ret = -EINVAL;
break;
}
-out:
+
return ret;
}
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
&pa_vs_config_reg1);
if (err)
- goto out;
+ return err;
/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
- err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
(pa_vs_config_reg1 | (1 << 12)));
-
-out:
- return err;
}
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
- err = -ENOMEM;
dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
- goto out;
+ return -ENOMEM;
}
/* Make a two way bind between the qcom host and the hba */
host->rcdev.owner = dev->driver->owner;
host->rcdev.nr_resets = 1;
err = devm_reset_controller_register(dev, &host->rcdev);
- if (err) {
+ if (err)
dev_warn(dev, "Failed to register reset controller\n");
- err = 0;
- }
if (!has_acpi_companion(dev)) {
host->generic_phy = devm_phy_get(dev, "ufsphy");
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
- if (err) {
+ if (err)
+ /* Failure is non-fatal */
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
- err = 0;
- }
- goto out;
+ return 0;
out_variant_clear:
ufshcd_set_variant(hba, NULL);
-out:
+
return err;
}
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
if (err)
- goto out;
+ return err;
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
core_clk_ctrl_reg |= clk_cycles;
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
+ return ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
-out:
- return err;
}
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
if (err || !dev_req_params) {
ufshcd_uic_hibern8_exit(hba);
- goto out;
+ return err;
}
ufs_qcom_cfg_timers(hba,
ufshcd_uic_hibern8_exit(hba);
}
-out:
- return err;
+ return 0;
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)