&tegra_host->autocal_offsets;
int err;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-up-offset-3v3",
&autocal->pull_up_3v3);
if (err)
autocal->pull_up_3v3 = 0;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-down-offset-3v3",
&autocal->pull_down_3v3);
if (err)
autocal->pull_down_3v3 = 0;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-up-offset-1v8",
&autocal->pull_up_1v8);
if (err)
autocal->pull_up_1v8 = 0;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-down-offset-1v8",
&autocal->pull_down_1v8);
if (err)
autocal->pull_down_1v8 = 0;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-up-offset-sdr104",
&autocal->pull_up_sdr104);
if (err)
autocal->pull_up_sdr104 = autocal->pull_up_1v8;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-down-offset-sdr104",
&autocal->pull_down_sdr104);
if (err)
autocal->pull_down_sdr104 = autocal->pull_down_1v8;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-up-offset-hs400",
&autocal->pull_up_hs400);
if (err)
autocal->pull_up_hs400 = autocal->pull_up_1v8;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-down-offset-hs400",
&autocal->pull_down_hs400);
if (err)
if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
return;
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
&autocal->pull_up_3v3_timeout);
if (err) {
autocal->pull_up_3v3_timeout = 0;
}
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
&autocal->pull_down_3v3_timeout);
if (err) {
autocal->pull_down_3v3_timeout = 0;
}
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
&autocal->pull_up_1v8_timeout);
if (err) {
autocal->pull_up_1v8_timeout = 0;
}
- err = device_property_read_u32(host->mmc->parent,
+ err = device_property_read_u32(mmc_dev(host->mmc),
"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
&autocal->pull_down_1v8_timeout);
if (err) {
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
int err;
- err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
+ err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
&tegra_host->default_tap);
if (err)
tegra_host->default_tap = 0;
- err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
+ err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
&tegra_host->default_trim);
if (err)
tegra_host->default_trim = 0;
- err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
+ err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
&tegra_host->dqs_trim);
if (err)
tegra_host->dqs_trim = 0x11;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
- if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
+ if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
tegra_host->enable_hwcq = true;
else
tegra_host->enable_hwcq = false;
host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
- cq_host = devm_kzalloc(host->mmc->parent,
+ cq_host = devm_kzalloc(mmc_dev(host->mmc),
sizeof(*cq_host), GFP_KERNEL);
if (!cq_host) {
ret = -ENOMEM;
if (host->bus_on)
return;
host->bus_on = true;
- pm_runtime_get_noresume(host->mmc->parent);
+ pm_runtime_get_noresume(mmc_dev(host->mmc));
}
static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
if (!host->bus_on)
return;
host->bus_on = false;
- pm_runtime_put_noidle(host->mmc->parent);
+ pm_runtime_put_noidle(mmc_dev(host->mmc));
}
void sdhci_reset(struct sdhci_host *host, u8 mask)
}
}
/* Switch ownership to the DMA */
- dma_sync_single_for_device(host->mmc->parent,
+ dma_sync_single_for_device(mmc_dev(host->mmc),
host->bounce_addr,
host->bounce_buffer_size,
mmc_get_dma_dir(data));
int ret = 0;
struct mmc_host *mmc = host->mmc;
- host->tx_chan = dma_request_chan(mmc->parent, "tx");
+ host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
if (IS_ERR(host->tx_chan)) {
ret = PTR_ERR(host->tx_chan);
if (ret != -EPROBE_DEFER)
return ret;
}
- host->rx_chan = dma_request_chan(mmc->parent, "rx");
+ host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
if (IS_ERR(host->rx_chan)) {
if (host->tx_chan) {
dma_release_channel(host->tx_chan);
unsigned long flags;
if (enable)
- pm_runtime_get_noresume(mmc->parent);
+ pm_runtime_get_noresume(mmc_dev(mmc));
spin_lock_irqsave(&host->lock, flags);
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
if (!enable)
- pm_runtime_put_noidle(mmc->parent);
+ pm_runtime_put_noidle(mmc_dev(mmc));
}
EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
length = host->bounce_buffer_size;
}
dma_sync_single_for_cpu(
- host->mmc->parent,
+ mmc_dev(host->mmc),
host->bounce_addr,
host->bounce_buffer_size,
DMA_FROM_DEVICE);
} else {
/* No copying, just switch ownership */
dma_sync_single_for_cpu(
- host->mmc->parent,
+ mmc_dev(host->mmc),
host->bounce_addr,
host->bounce_buffer_size,
mmc_get_dma_dir(data));
* speedups by the help of a bounce buffer to group scattered
* reads/writes together.
*/
- host->bounce_buffer = devm_kmalloc(mmc->parent,
+ host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
bounce_size,
GFP_KERNEL);
if (!host->bounce_buffer) {
return;
}
- host->bounce_addr = dma_map_single(mmc->parent,
+ host->bounce_addr = dma_map_single(mmc_dev(mmc),
host->bounce_buffer,
bounce_size,
DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(mmc->parent, host->bounce_addr);
+ ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
if (ret)
/* Again fall back to max_segs == 1 */
return;