struct hsmmc {
#ifndef CONFIG_OMAP34XX
- unsigned char res0[0x100];
+ unsigned int hl_rev;
+ unsigned int hl_hwinfo;
+ unsigned int hl_sysconfig;
+ unsigned char res0[0xf4];
#endif
unsigned char res1[0x10];
unsigned int sysconfig; /* 0x10 */
unsigned int ie; /* 0x134 */
unsigned char res4[0x8];
unsigned int capa; /* 0x140 */
+ unsigned char res5[0x10];
+ unsigned int admaes; /* 0x154 */
+ unsigned int admasal; /* 0x158 */
};
struct omap_hsmmc_plat {
/*
* OMAP HS MMC Bit definitions
*/
+#define MADMA_EN (0x1 << 0)
#define MMC_SOFTRESET (0x1 << 1)
#define RESETDONE (0x1 << 0)
#define NOOPENDRAIN (0x0 << 0)
#define WPP_ACTIVEHIGH (0x0 << 8)
#define RESERVED_MASK (0x3 << 9)
#define CTPL_MMC_SD (0x0 << 11)
+#define DMA_MASTER (0x1 << 20)
#define BLEN_512BYTESLEN (0x200 << 0)
#define NBLK_STPCNT (0x0 << 16)
#define DE_DISABLE (0x0 << 0)
#define SDBP_PWRON (0x1 << 8)
#define SDVS_1V8 (0x5 << 9)
#define SDVS_3V0 (0x6 << 9)
+#define DMA_SELECT (0x2 << 3)
#define ICE_MASK (0x1 << 0)
#define ICE_STOP (0x0 << 0)
#define ICS_MASK (0x1 << 1)
#define IE_DTO (0x01 << 20)
#define IE_DCRC (0x01 << 21)
#define IE_DEB (0x01 << 22)
+#define IE_ADMAE (0x01 << 25)
#define IE_CERR (0x01 << 28)
#define IE_BADA (0x01 << 29)
#include <config.h>
#include <common.h>
#include <malloc.h>
+#include <memalign.h>
#include <mmc.h>
#include <part.h>
#include <i2c.h>
int wp_gpio;
#endif
#endif
+ u8 controller_flags;
+#ifndef CONFIG_OMAP34XX
+ struct omap_hsmmc_adma_desc *adma_desc_table;
+ uint desc_slot;
+#endif
+};
+
+#ifndef CONFIG_OMAP34XX
+struct omap_hsmmc_adma_desc {
+ u8 attr;
+ u8 reserved;
+ u16 len;
+ u32 addr;
};
+#define ADMA_MAX_LEN 63488
+
+/* Decriptor table defines */
+#define ADMA_DESC_ATTR_VALID BIT(0)
+#define ADMA_DESC_ATTR_END BIT(1)
+#define ADMA_DESC_ATTR_INT BIT(2)
+#define ADMA_DESC_ATTR_ACT1 BIT(4)
+#define ADMA_DESC_ATTR_ACT2 BIT(5)
+
+#define ADMA_DESC_TRANSFER_DATA ADMA_DESC_ATTR_ACT2
+#define ADMA_DESC_LINK_DESC (ADMA_DESC_ATTR_ACT1 | ADMA_DESC_ATTR_ACT2)
+#endif
+
/* If we fail after 1 second wait, something is really bad */
#define MAX_RETRY_MS 1000
+/* DMA transfers can take a long time if a lot a data is transferred.
+ * The timeout must take in account the amount of data. Let's assume
+ * that the time will never exceed 333 ms per MB (in other word we assume
+ * that the bandwidth is always above 3MB/s).
+ */
+#define DMA_TIMEOUT_PER_MB 333
+#define OMAP_HSMMC_USE_ADMA BIT(2)
+
static int mmc_read_data(struct hsmmc *mmc_base, char *buf, unsigned int size);
static int mmc_write_data(struct hsmmc *mmc_base, const char *buf,
unsigned int siz);
return -ETIMEDOUT;
}
}
+#ifndef CONFIG_OMAP34XX
+ reg_val = readl(&mmc_base->hl_hwinfo);
+ if (reg_val & MADMA_EN)
+ priv->controller_flags |= OMAP_HSMMC_USE_ADMA;
+#endif
writel(DTW_1_BITMODE | SDBP_PWROFF | SDVS_3V0, &mmc_base->hctl);
writel(readl(&mmc_base->capa) | VS30_3V0SUP | VS18_1V8SUP,
&mmc_base->capa);
writel(readl(&mmc_base->hctl) | SDBP_PWRON, &mmc_base->hctl);
writel(IE_BADA | IE_CERR | IE_DEB | IE_DCRC | IE_DTO | IE_CIE |
- IE_CEB | IE_CCRC | IE_CTO | IE_BRR | IE_BWR | IE_TC | IE_CC,
- &mmc_base->ie);
+ IE_CEB | IE_CCRC | IE_ADMAE | IE_CTO | IE_BRR | IE_BWR | IE_TC |
+ IE_CC, &mmc_base->ie);
mmc_init_stream(mmc_base);
}
}
}
+
+#ifndef CONFIG_OMAP34XX
+static void omap_hsmmc_adma_desc(struct mmc *mmc, char *buf, u16 len, bool end)
+{
+ struct omap_hsmmc_data *priv = omap_hsmmc_get_data(mmc);
+ struct omap_hsmmc_adma_desc *desc;
+ u8 attr;
+
+ desc = &priv->adma_desc_table[priv->desc_slot];
+
+ attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA;
+ if (!end)
+ priv->desc_slot++;
+ else
+ attr |= ADMA_DESC_ATTR_END;
+
+ desc->len = len;
+ desc->addr = (u32)buf;
+ desc->reserved = 0;
+ desc->attr = attr;
+}
+
+static void omap_hsmmc_prepare_adma_table(struct mmc *mmc,
+ struct mmc_data *data)
+{
+ uint total_len = data->blocksize * data->blocks;
+ uint desc_count = DIV_ROUND_UP(total_len, ADMA_MAX_LEN);
+ struct omap_hsmmc_data *priv = omap_hsmmc_get_data(mmc);
+ int i = desc_count;
+ char *buf;
+
+ priv->desc_slot = 0;
+ priv->adma_desc_table = (struct omap_hsmmc_adma_desc *)
+ memalign(ARCH_DMA_MINALIGN, desc_count *
+ sizeof(struct omap_hsmmc_adma_desc));
+
+ if (data->flags & MMC_DATA_READ)
+ buf = data->dest;
+ else
+ buf = (char *)data->src;
+
+ while (--i) {
+ omap_hsmmc_adma_desc(mmc, buf, ADMA_MAX_LEN, false);
+ buf += ADMA_MAX_LEN;
+ total_len -= ADMA_MAX_LEN;
+ }
+
+ omap_hsmmc_adma_desc(mmc, buf, total_len, true);
+
+ flush_dcache_range((long)priv->adma_desc_table,
+ (long)priv->adma_desc_table +
+ ROUND(desc_count *
+ sizeof(struct omap_hsmmc_adma_desc),
+ ARCH_DMA_MINALIGN));
+}
+
+static void omap_hsmmc_prepare_data(struct mmc *mmc, struct mmc_data *data)
+{
+ struct hsmmc *mmc_base;
+ struct omap_hsmmc_data *priv = omap_hsmmc_get_data(mmc);
+ u32 val;
+ char *buf;
+
+ mmc_base = priv->base_addr;
+ omap_hsmmc_prepare_adma_table(mmc, data);
+
+ if (data->flags & MMC_DATA_READ)
+ buf = data->dest;
+ else
+ buf = (char *)data->src;
+
+ val = readl(&mmc_base->hctl);
+ val |= DMA_SELECT;
+ writel(val, &mmc_base->hctl);
+
+ val = readl(&mmc_base->con);
+ val |= DMA_MASTER;
+ writel(val, &mmc_base->con);
+
+ writel((u32)priv->adma_desc_table, &mmc_base->admasal);
+
+ flush_dcache_range((u32)buf,
+ (u32)buf +
+ ROUND(data->blocksize * data->blocks,
+ ARCH_DMA_MINALIGN));
+}
+
+static void omap_hsmmc_dma_cleanup(struct mmc *mmc)
+{
+ struct hsmmc *mmc_base;
+ struct omap_hsmmc_data *priv = omap_hsmmc_get_data(mmc);
+ u32 val;
+
+ mmc_base = priv->base_addr;
+
+ val = readl(&mmc_base->con);
+ val &= ~DMA_MASTER;
+ writel(val, &mmc_base->con);
+
+ val = readl(&mmc_base->hctl);
+ val &= ~DMA_SELECT;
+ writel(val, &mmc_base->hctl);
+
+ kfree(priv->adma_desc_table);
+}
+#else
+#define omap_hsmmc_adma_desc
+#define omap_hsmmc_prepare_adma_table
+#define omap_hsmmc_prepare_data
+#define omap_hsmmc_dma_cleanup
+#endif
+
#if !CONFIG_IS_ENABLED(DM_MMC)
static int omap_hsmmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
struct mmc_data *data)
struct mmc_data *data)
{
struct omap_hsmmc_data *priv = dev_get_priv(dev);
+#ifndef CONFIG_OMAP34XX
+ struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
+ struct mmc *mmc = upriv->mmc;
+#endif
#endif
struct hsmmc *mmc_base;
unsigned int flags, mmc_stat;
flags |= (DP_DATA | DDIR_READ);
else
flags |= (DP_DATA | DDIR_WRITE);
+
+#ifndef CONFIG_OMAP34XX
+ if ((priv->controller_flags & OMAP_HSMMC_USE_ADMA) &&
+ !mmc_is_tuning_cmd(cmd->cmdidx)) {
+ omap_hsmmc_prepare_data(mmc, data);
+ flags |= DE_ENABLE;
+ }
+#endif
}
writel(cmd->cmdarg, &mmc_base->arg);
start = get_timer(0);
do {
mmc_stat = readl(&mmc_base->stat);
- if (get_timer(0) - start > MAX_RETRY_MS) {
+ if (get_timer(start) > MAX_RETRY_MS) {
printf("%s : timeout: No status update\n", __func__);
return -ETIMEDOUT;
}
}
}
+#ifndef CONFIG_OMAP34XX
+ if ((priv->controller_flags & OMAP_HSMMC_USE_ADMA) && data &&
+ !mmc_is_tuning_cmd(cmd->cmdidx)) {
+ u32 sz_mb, timeout;
+
+ if (mmc_stat & IE_ADMAE) {
+ omap_hsmmc_dma_cleanup(mmc);
+ return -EIO;
+ }
+
+ sz_mb = DIV_ROUND_UP(data->blocksize * data->blocks, 1 << 20);
+ timeout = sz_mb * DMA_TIMEOUT_PER_MB;
+ if (timeout < MAX_RETRY_MS)
+ timeout = MAX_RETRY_MS;
+
+ start = get_timer(0);
+ do {
+ mmc_stat = readl(&mmc_base->stat);
+ if (mmc_stat & TC_MASK) {
+ writel(readl(&mmc_base->stat) | TC_MASK,
+ &mmc_base->stat);
+ break;
+ }
+ if (get_timer(start) > timeout) {
+ printf("%s : DMA timeout: No status update\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ omap_hsmmc_dma_cleanup(mmc);
+ return 0;
+ }
+#endif
+
if (data && (data->flags & MMC_DATA_READ)) {
mmc_read_data(mmc_base, data->dest,
data->blocksize * data->blocks);