aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r--drivers/mmc/host/alcor.c6
-rw-r--r--drivers/mmc/host/cavium-octeon.c10
-rw-r--r--drivers/mmc/host/cavium-thunderx.c148
-rw-r--r--drivers/mmc/host/cavium.c1023
-rw-r--r--drivers/mmc/host/cavium.h137
-rw-r--r--drivers/mmc/host/cqhci.c21
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c14
-rw-r--r--drivers/mmc/host/mmc_spi.c11
-rw-r--r--drivers/mmc/host/mmci.c34
-rw-r--r--drivers/mmc/host/mmci.h8
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c30
-rw-r--r--drivers/mmc/host/pxamci.c8
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c31
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c18
-rw-r--r--drivers/mmc/host/sdhci-acpi.c10
-rw-r--r--drivers/mmc/host/sdhci-cadence.c131
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c40
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c19
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c58
-rw-r--r--drivers/mmc/host/sdhci-omap.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c67
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c6
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-tegra.c5
-rw-r--r--drivers/mmc/host/sdhci-xenon.c10
-rw-r--r--drivers/mmc/host/sdhci.c64
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sdhci_am654.c18
-rw-r--r--drivers/mmc/host/sh_mmcif.c6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c8
-rw-r--r--drivers/mmc/host/uniphier-sd.c12
-rw-r--r--drivers/mmc/host/via-sdmmc.c7
35 files changed, 1654 insertions, 326 deletions
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index e481535cba2b..d76ecd9bd452 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to get irq for data line\n");
- return ret;
+ goto free_host;
}
mutex_init(&host->cmd_mutex);
@@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, host);
mmc_add_host(mmc);
return 0;
+
+free_host:
+ mmc_free_host(mmc);
+ return ret;
}
static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 22aded1065ae..302c052b8d93 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -247,8 +247,8 @@ static int octeon_mmc_probe(struct platform_device *pdev)
/* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
for (i = 1; i <= 4; i++) {
ret = devm_request_irq(&pdev->dev, mmc_irq[i],
- cvm_mmc_interrupt,
- 0, cvm_mmc_irq_names[i], host);
+ cvm_mmc_interrupt, IRQF_NO_THREAD,
+ cvm_mmc_irq_names[i], host);
if (ret < 0) {
dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
mmc_irq[i]);
@@ -257,8 +257,8 @@ static int octeon_mmc_probe(struct platform_device *pdev)
}
} else {
ret = devm_request_irq(&pdev->dev, mmc_irq[0],
- cvm_mmc_interrupt, 0, KBUILD_MODNAME,
- host);
+ cvm_mmc_interrupt, IRQF_NO_THREAD,
+ KBUILD_MODNAME, host);
if (ret < 0) {
dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
mmc_irq[0]);
@@ -277,7 +277,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
i = 0;
- for_each_child_of_node(node, cn) {
+ for_each_available_child_of_node(node, cn) {
host->slot_pdev[i] =
of_platform_device_create(cn, NULL, &pdev->dev);
if (!host->slot_pdev[i]) {
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index eee08d81b242..7fbf33e34217 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -15,6 +15,8 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitfield.h>
#include "cavium.h"
static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
@@ -31,6 +33,8 @@ static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
{
writeq(val, host->base + MIO_EMM_INT(host));
writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
+ writeq(MIO_EMM_DMA_INT_DMA,
+ host->dma_base + MIO_EMM_DMA_INT(host));
}
static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
@@ -45,14 +49,125 @@ static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
/* register interrupts */
for (i = 0; i < nvec; i++) {
ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
- cvm_mmc_interrupt,
- 0, cvm_mmc_irq_names[i], host);
+ cvm_mmc_interrupt, IRQF_NO_THREAD,
+ cvm_mmc_irq_names[i], host);
if (ret)
return ret;
}
return 0;
}
+/* calibration evaluates the per tap delay */
+static void thunder_calibrate_mmc(struct cvm_mmc_host *host)
+{
+ u32 retries = 10;
+ u32 delay = 4;
+ unsigned int ps;
+ const char *how = "default";
+
+ if (is_mmc_8xxx(host))
+ return;
+
+ /* set _DEBUG[CLK_ON]=1 as workaround for clock issue */
+ if (is_mmc_otx2_A0(host) || is_mmc_95xx(host))
+ writeq(1, host->base + MIO_EMM_DEBUG(host));
+
+ if (is_mmc_otx2_A0(host)) {
+ /*
+ * Operation of up to 100 MHz may be achieved by skipping the
+ * steps that establish the tap delays and instead assuming
+ * that MIO_EMM_TAP[DELAY] returns 0x4 indicating 78 pS/tap.
+ */
+ } else {
+ u64 tap;
+ u64 emm_cfg = readq(host->base + MIO_EMM_CFG(host));
+ u64 tcfg;
+ u64 emm_io_ctl;
+ u64 emm_switch;
+ u64 emm_wdog;
+ u64 emm_sts_mask;
+ u64 emm_debug;
+ u64 emm_timing;
+ u64 emm_rca;
+
+ /*
+ * MIO_EMM_CFG[BUS_ENA] must be zero for calibration,
+ * but that resets whole host, so save state.
+ */
+ emm_io_ctl = readq(host->base + MIO_EMM_IO_CTL(host));
+ emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
+ emm_wdog = readq(host->base + MIO_EMM_WDOG(host));
+ emm_sts_mask =
+ readq(host->base + MIO_EMM_STS_MASK(host));
+ emm_debug = readq(host->base + MIO_EMM_DEBUG(host));
+ emm_timing = readq(host->base + MIO_EMM_TIMING(host));
+ emm_rca = readq(host->base + MIO_EMM_RCA(host));
+
+ /* reset controller */
+ tcfg = emm_cfg;
+ tcfg &= ~MIO_EMM_CFG_BUS_ENA;
+ writeq(tcfg, host->base + MIO_EMM_CFG(host));
+ udelay(1);
+
+ /* restart with phantom slot 3 */
+ tcfg |= FIELD_PREP(MIO_EMM_CFG_BUS_ENA, 1ull << 3);
+ writeq(tcfg, host->base + MIO_EMM_CFG(host));
+ mdelay(1);
+
+ /* Start calibration */
+ writeq(0, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+ writeq(START_CALIBRATION, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+
+ do {
+ /* wait for approximately 300 coprocessor clock */
+ udelay(5);
+ tap = readq(host->base + MIO_EMM_TAP(host));
+ } while (!tap && retries--);
+
+ /* leave calibration mode */
+ writeq(0, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+
+ if (retries <= 0 || !tap) {
+ how = "fallback";
+ } else {
+ /* calculate the per-tap delay */
+ delay = tap & MIO_EMM_TAP_DELAY;
+ how = "calibrated";
+ }
+
+ /* restore old state */
+ writeq(emm_cfg, host->base + MIO_EMM_CFG(host));
+ mdelay(1);
+ writeq(emm_rca, host->base + MIO_EMM_RCA(host));
+ writeq(emm_timing, host->base + MIO_EMM_TIMING(host));
+ writeq(emm_debug, host->base + MIO_EMM_DEBUG(host));
+ writeq(emm_sts_mask,
+ host->base + MIO_EMM_STS_MASK(host));
+ writeq(emm_wdog, host->base + MIO_EMM_WDOG(host));
+ writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+ writeq(emm_io_ctl, host->base + MIO_EMM_IO_CTL(host));
+ mdelay(1);
+
+ }
+
+ /*
+ * Scale measured/guessed calibration value to pS:
+ * The delay value should be multiplied by 10 ns(or 10000 ps)
+ * and then divided by no of taps to determine the estimated
+ * delay in pico second. The nominal value is 125 ps per tap.
+ */
+ ps = (delay * PS_10000) / TOTAL_NO_OF_TAPS;
+ if (host->per_tap_delay != ps) {
+ dev_info(host->dev, "%s delay:%d per_tap_delay:%dpS\n",
+ how, delay, ps);
+ host->per_tap_delay = ps;
+ host->delay_logged = 0;
+ }
+}
+
static int thunder_mmc_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -81,6 +196,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
/* On ThunderX these are identical */
host->dma_base = host->base;
+ host->pdev = pdev;
host->reg_off = 0x2000;
host->reg_off_dma = 0x160;
@@ -107,24 +223,32 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
host->need_irq_handler_lock = true;
host->last_slot = -1;
- ret = dma_set_mask(dev, DMA_BIT_MASK(48));
if (ret)
goto error;
/*
* Clear out any pending interrupts that may be left over from
* bootloader. Writing 1 to the bits clears them.
+ * Clear DMA FIFO after IRQ disable, then stub any dangling events
*/
- writeq(127, host->base + MIO_EMM_INT_EN(host));
- writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
- /* Clear DMA FIFO */
- writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(~0, host->base + MIO_EMM_INT(host));
+ writeq(~0, host->dma_base + MIO_EMM_DMA_INT_ENA_W1C(host));
+ writeq(~0, host->base + MIO_EMM_INT_EN_CLR(host));
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(~0, host->dma_base + MIO_EMM_DMA_INT(host));
ret = thunder_mmc_register_interrupts(host, pdev);
if (ret)
goto error;
- for_each_child_of_node(node, child_node) {
+ /* Run the calibration to calculate per tap delay that would be
+ * used to evaluate values. These values would be programmed in
+ * MIO_EMM_TIMING.
+ */
+ thunder_calibrate_mmc(host);
+
+ for_each_available_child_of_node(node, child_node) {
/*
* mmc_of_parse and devm* require one device per slot.
* Create a dummy device per slot and set the node pointer to
@@ -137,12 +261,15 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
if (!host->slot_pdev[i])
continue;
+ dev_info(dev, "Probing slot %d\n", i);
+
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
if (ret)
goto error;
}
i++;
}
+
dev_info(dev, "probed\n");
return 0;
@@ -171,8 +298,11 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
cvm_mmc_of_slot_remove(host->slot[i]);
dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
- dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
+ dma_cfg |= MIO_EMM_DMA_CFG_CLR;
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
+ do {
+ dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
+ } while (dma_cfg & MIO_EMM_DMA_CFG_EN);
clk_disable_unprepare(host->clk);
}
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 89deb451e0ac..5005efd113ee 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -25,6 +25,8 @@
#include <linux/regulator/consumer.h>
#include <linux/scatterlist.h>
#include <linux/time.h>
+#include <linux/iommu.h>
+#include <linux/swiotlb.h>
#include "cavium.h"
@@ -38,6 +40,8 @@ const char *cvm_mmc_irq_names[] = {
"MMC Switch Error",
"MMC DMA int Fifo",
"MMC DMA int",
+ "MMC NCB Fault",
+ "MMC RAS",
};
/*
@@ -71,7 +75,7 @@ static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 1}, /* CMD16 */
{1, 1}, /* CMD17 */
{1, 1}, /* CMD18 */
- {3, 1}, /* CMD19 */
+ {2, 1}, /* CMD19 */
{2, 1}, /* CMD20 */
{0, 0}, /* CMD21 */
{0, 0}, /* CMD22 */
@@ -118,6 +122,156 @@ static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 0} /* CMD63 */
};
+static int tapdance;
+module_param(tapdance, int, 0644);
+MODULE_PARM_DESC(tapdance, "adjust bus-timing: (0=mid-eye, positive=Nth_fastest_tap)");
+
+static int clk_scale = 100;
+module_param(clk_scale, int, 0644);
+MODULE_PARM_DESC(clk_scale, "percent scale data_/cmd_out taps (default 100)");
+
+static bool fixed_timing;
+module_param(fixed_timing, bool, 0444);
+MODULE_PARM_DESC(fixed_timing, "use fixed data_/cmd_out taps");
+
+static bool ddr_cmd_taps;
+module_param(ddr_cmd_taps, bool, 0644);
+MODULE_PARM_DESC(ddr_cmd_taps, "reduce cmd_out_taps in DDR modes, as before");
+
+static bool cvm_is_mmc_timing_ddr(struct cvm_mmc_slot *slot)
+{
+ if ((slot->mmc->ios.timing == MMC_TIMING_UHS_DDR50) ||
+ (slot->mmc->ios.timing == MMC_TIMING_MMC_DDR52) ||
+ (slot->mmc->ios.timing == MMC_TIMING_MMC_HS400))
+ return true;
+ else
+ return false;
+}
+
+static void cvm_mmc_set_timing(struct cvm_mmc_slot *slot)
+{
+ if (!is_mmc_otx2(slot->host))
+ return;
+
+ writeq(slot->taps, slot->host->base + MIO_EMM_TIMING(slot->host));
+}
+
+static int tout(struct cvm_mmc_slot *slot, int ps, int hint)
+{
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ int tap_ps = host->per_tap_delay;
+ int timing = mmc->ios.timing;
+ static int old_scale;
+ int taps;
+
+ if (fixed_timing)
+ return hint;
+
+ if (!hint)
+ hint = 63;
+
+ if (!tap_ps)
+ return hint;
+
+ taps = min((int)(ps * clk_scale) / (tap_ps * 100), 63);
+
+ /* when modparam is adjusted, re-announce timing */
+ if (old_scale != clk_scale) {
+ host->delay_logged = 0;
+ old_scale = clk_scale;
+ }
+
+ if (!test_and_set_bit(timing,
+ &host->delay_logged))
+ dev_info(host->dev, "mmc%d.ios_timing:%d %dpS hint:%d taps:%d\n",
+ mmc->index, timing, ps, hint, taps);
+
+ return taps;
+}
+
+static int cvm_mmc_configure_delay(struct cvm_mmc_slot *slot)
+{
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+
+ pr_debug("slot%d.configure_delay\n", slot->bus_id);
+
+ if (is_mmc_8xxx(host)) {
+ /* MIO_EMM_SAMPLE is till T83XX */
+ u64 emm_sample =
+ FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
+ FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->data_cnt);
+ writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
+ } else {
+ int half = MAX_NO_OF_TAPS / 2;
+ int cin = FIELD_GET(MIO_EMM_TIMING_CMD_IN, slot->taps);
+ int din = FIELD_GET(MIO_EMM_TIMING_DATA_IN, slot->taps);
+ int cout, dout;
+
+ if (!slot->taps)
+ cin = din = half;
+ /*
+ * EMM_CMD hold time from rising edge of EMMC_CLK.
+ * Typically 3.0 ns at frequencies < 26 MHz.
+ * Typically 3.0 ns at frequencies <= 52 MHz SDR.
+ * Typically 2.5 ns at frequencies <= 52 MHz DDR.
+ * Typically 0.8 ns at frequencies > 52 MHz SDR.
+ * Typically 0.4 ns at frequencies > 52 MHz DDR.
+ */
+ switch (mmc->ios.timing) {
+ case MMC_TIMING_LEGACY:
+ default:
+ if (mmc->card && mmc_card_mmc(mmc->card))
+ cout = tout(slot, 5000, 39);
+ else
+ cout = tout(slot, 8000, 63);
+ break;
+ case MMC_TIMING_UHS_SDR12:
+ cout = tout(slot, 3000, 39);
+ break;
+ case MMC_TIMING_MMC_HS:
+ cout = tout(slot, 2500, 32);
+ break;
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ cout = tout(slot, 2000, 26);
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ cout = tout(slot, 1500, 20);
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_MMC_HS400:
+ cout = tout(slot, 800, 10);
+ break;
+ }
+
+ if (!is_mmc_95xx(host)) {
+ if (!cvm_is_mmc_timing_ddr(slot))
+ dout = cout;
+ else if (ddr_cmd_taps)
+ cout = dout = cout / 2;
+ else
+ dout = cout / 2;
+ } else
+ dout = tout(slot, 800, 10);
+
+ slot->taps =
+ FIELD_PREP(MIO_EMM_TIMING_CMD_IN, cin) |
+ FIELD_PREP(MIO_EMM_TIMING_CMD_OUT, cout) |
+ FIELD_PREP(MIO_EMM_TIMING_DATA_IN, din) |
+ FIELD_PREP(MIO_EMM_TIMING_DATA_OUT, dout);
+
+ pr_debug("slot%d.taps %llx\n", slot->bus_id, slot->taps);
+ cvm_mmc_set_timing(slot);
+ }
+
+ return 0;
+}
+
static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
{
struct cvm_mmc_cr_type *cr;
@@ -175,14 +329,14 @@ static void check_switch_errors(struct cvm_mmc_host *host)
dev_err(host->dev, "Switch bus width error\n");
}
-static void clear_bus_id(u64 *reg)
+static inline void clear_bus_id(u64 *reg)
{
u64 bus_id_mask = GENMASK_ULL(61, 60);
*reg &= ~bus_id_mask;
}
-static void set_bus_id(u64 *reg, int bus_id)
+static inline void set_bus_id(u64 *reg, int bus_id)
{
clear_bus_id(reg);
*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
@@ -193,25 +347,69 @@ static int get_bus_id(u64 reg)
return FIELD_GET(GENMASK_ULL(61, 60), reg);
}
-/*
- * We never set the switch_exe bit since that would interfere
- * with the commands send by the MMC core.
- */
-static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+/* save old slot details, switch power */
+static bool pre_switch(struct cvm_mmc_host *host, u64 emm_switch)
{
- int retries = 100;
- u64 rsp_sts;
- int bus_id;
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+ struct cvm_mmc_slot *old_slot;
+ bool same_vqmmc = false;
- /*
- * Modes setting only taken from slot 0. Work around that hardware
- * issue by first switching to slot 0.
+ if (host->last_slot == bus_id)
+ return false;
+
+ /* when VQMMC is switched, tri-state CMDn over any slot change
+ * to avoid transient states on D0-7 or CLK from level-shifters
*/
- bus_id = get_bus_id(emm_switch);
- clear_bus_id(&emm_switch);
- writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+ if (host->use_vqmmc) {
+ writeq(1ull << 3, host->base + MIO_EMM_CFG(host));
+ udelay(10);
+ }
+
+ if (host->last_slot >= 0 && host->slot[host->last_slot]) {
+ old_slot = host->slot[host->last_slot];
+ old_slot->cached_switch =
+ readq(host->base + MIO_EMM_SWITCH(host));
+ old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
+
+ same_vqmmc = (slot->mmc->supply.vqmmc ==
+ old_slot->mmc->supply.vqmmc);
+ if (!same_vqmmc && !IS_ERR_OR_NULL(old_slot->mmc->supply.vqmmc))
+ regulator_disable(old_slot->mmc->supply.vqmmc);
+ }
+
+ if (!same_vqmmc && !IS_ERR_OR_NULL(slot->mmc->supply.vqmmc)) {
+ int e = regulator_enable(slot->mmc->supply.vqmmc);
+
+ if (e)
+ dev_err(host->dev, "mmc-slot@%d.vqmmc err %d\n",
+ bus_id, e);
+ }
+
+ host->last_slot = slot->bus_id;
+
+ return true;
+}
+
+static void post_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+
+ if (host->use_vqmmc) {
+ /* enable new CMDn */
+ writeq(1ull << bus_id, host->base + MIO_EMM_CFG(host));
+ udelay(10);
+ }
+
+ writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
+}
+
+static inline void mode_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ u64 rsp_sts;
+ int retries = 100;
- set_bus_id(&emm_switch, bus_id);
writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
/* wait for the switch to finish */
@@ -221,15 +419,49 @@ static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
break;
udelay(10);
} while (--retries);
+}
+
+/*
+ * We never set the switch_exe bit since that would interfere
+ * with the commands send by the MMC core.
+ */
+static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+ bool slot_changed = pre_switch(host, emm_switch);
+
+ /*
+ * Modes setting only taken from slot 0. Work around that hardware
+ * issue by first switching to slot 0.
+ */
+ if (bus_id) {
+ u64 switch0 = emm_switch;
+
+ clear_bus_id(&switch0);
+ mode_switch(host, switch0);
+ }
+
+ mode_switch(host, emm_switch);
check_switch_errors(host);
+ if (slot_changed)
+ post_switch(host, emm_switch);
+ slot->cached_switch = emm_switch;
+ if (emm_switch & MIO_EMM_SWITCH_CLK)
+ slot->cmd6_pending = false;
}
+/* need to change hardware state to match software requirements? */
static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
{
/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
- u64 match = 0x3001070fffffffffull;
+ /* For 9xxx add HS200_TIMING and HS400_TIMING */
+ u64 match = (is_mmc_otx2(slot->host)) ?
+ 0x3007070fffffffffull : 0x3001070fffffffffull;
+ if (!slot->host->powered)
+ return true;
return (slot->cached_switch & match) != (new_val & match);
}
@@ -247,58 +479,62 @@ static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
}
+static void emmc_io_drive_setup(struct cvm_mmc_slot *slot)
+{
+ u64 ioctl_cfg;
+ struct cvm_mmc_host *host = slot->host;
+
+ /* Setup drive and slew only for 9x */
+ if (is_mmc_otx2(host)) {
+ if ((slot->drive < 0) || (slot->slew < 0))
+ return;
+ /* Setup the emmc interface current drive
+ * strength & clk slew rate.
+ */
+ ioctl_cfg = FIELD_PREP(MIO_EMM_IO_CTL_DRIVE, slot->drive) |
+ FIELD_PREP(MIO_EMM_IO_CTL_SLEW, slot->slew);
+ writeq(ioctl_cfg, host->base + MIO_EMM_IO_CTL(host));
+ }
+}
+
static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
u64 emm_switch, wdog;
- emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
- emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
- MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
+ emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
+ emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERRS);
set_bus_id(&emm_switch, slot->bus_id);
- wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
- do_switch(slot->host, emm_switch);
-
- slot->cached_switch = emm_switch;
+ wdog = readq(host->base + MIO_EMM_WDOG(host));
+ do_switch(host, emm_switch);
+ host->powered = true;
msleep(20);
- writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
+ writeq(wdog, host->base + MIO_EMM_WDOG(host));
}
/* Switch to another slot if needed */
static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
- struct cvm_mmc_slot *old_slot;
- u64 emm_sample, emm_switch;
if (slot->bus_id == host->last_slot)
return;
- if (host->last_slot >= 0 && host->slot[host->last_slot]) {
- old_slot = host->slot[host->last_slot];
- old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
- old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
- }
-
- writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
- emm_switch = slot->cached_switch;
- set_bus_id(&emm_switch, slot->bus_id);
- do_switch(host, emm_switch);
-
- emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
- FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
- writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
+ do_switch(host, slot->cached_switch);
+ host->powered = true;
- host->last_slot = slot->bus_id;
+ emmc_io_drive_setup(slot);
+ cvm_mmc_configure_delay(slot);
}
-static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
+static void do_read(struct cvm_mmc_slot *slot, struct mmc_request *req,
u64 dbuf)
{
- struct sg_mapping_iter *smi = &host->smi;
+ struct cvm_mmc_host *host = slot->host;
+ struct sg_mapping_iter *smi = &slot->smi;
int data_len = req->data->blocks * req->data->blksz;
int bytes_xfered, shift = -1;
u64 dat = 0;
@@ -365,7 +601,7 @@ static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
}
}
-static int get_dma_dir(struct mmc_data *data)
+static inline int get_dma_dir(struct mmc_data *data)
{
return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
@@ -374,6 +610,9 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
{
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
+
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -382,6 +621,7 @@ static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
u64 fifo_cfg;
int count;
+ void __iomem *dma_intp = host->dma_base + MIO_EMM_DMA_INT(host);
/* Check if there are any pending requests left */
fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
@@ -392,8 +632,16 @@ static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
- /* Clear and disable FIFO */
- writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+
+ /* on read, wait for internal buffer to flush out to mem */
+ if (get_dma_dir(data) == DMA_FROM_DEVICE) {
+ while (!(readq(dma_intp) & MIO_EMM_DMA_INT_DMA))
+ udelay(10);
+ writeq(MIO_EMM_DMA_INT_DMA, dma_intp);
+ }
+
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -415,7 +663,8 @@ static int check_status(u64 rsp_sts)
if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
return -ETIMEDOUT;
- if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
+ if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR ||
+ rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
return -EIO;
return 0;
}
@@ -435,16 +684,24 @@ static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
{
struct cvm_mmc_host *host = dev_id;
- struct mmc_request *req;
+ struct mmc_request *req = NULL;
+ struct cvm_mmc_slot *slot = NULL;
unsigned long flags = 0;
u64 emm_int, rsp_sts;
bool host_done;
+ int bus_id;
if (host->need_irq_handler_lock)
spin_lock_irqsave(&host->irq_handler_lock, flags);
else
__acquire(&host->irq_handler_lock);
+ rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+ bus_id = get_bus_id(rsp_sts);
+ slot = host->slot[bus_id];
+ if (slot)
+ req = slot->current_req;
+
/* Clear interrupt bits (write 1 clears ). */
emm_int = readq(host->base + MIO_EMM_INT(host));
writeq(emm_int, host->base + MIO_EMM_INT(host));
@@ -452,25 +709,32 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
if (emm_int & MIO_EMM_INT_SWITCH_ERR)
check_switch_errors(host);
- req = host->current_req;
if (!req)
goto out;
- rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+ /*
+ * dma_pend means DMA has stalled with CRC errs.
+ * start teardown, get irq on completion, mmc stack retries.
+ */
+ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) && slot->dma_active) {
+ cleanup_dma(host, rsp_sts);
+ goto out;
+ }
+
/*
* dma_val set means DMA is still in progress. Don't touch
* the request and wait for the interrupt indicating that
* the DMA is finished.
*/
- if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
+ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && slot->dma_active)
goto out;
- if (!host->dma_active && req->data &&
+ if (!slot->dma_active && req->data &&
(emm_int & MIO_EMM_INT_BUF_DONE)) {
unsigned int type = (rsp_sts >> 7) & 3;
if (type == 1)
- do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
+ do_read(slot, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
else if (type == 2)
do_write(req);
}
@@ -480,12 +744,16 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
emm_int & MIO_EMM_INT_CMD_ERR ||
emm_int & MIO_EMM_INT_DMA_ERR;
+ /* Add NCB_FLT interrupt for octtx2 */
+ if (is_mmc_otx2(host))
+ host_done = host_done || emm_int & MIO_EMM_INT_NCB_FLT;
+
if (!(host_done && req->done))
goto no_req_done;
req->cmd->error = check_status(rsp_sts);
- if (host->dma_active && req->data)
+ if (slot->dma_active && req->data)
if (!finish_dma(host, req->data))
goto no_req_done;
@@ -494,7 +762,18 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
(rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
cleanup_dma(host, rsp_sts);
- host->current_req = NULL;
+ /* follow CMD6 timing/width with IMMEDIATE switch */
+ if (slot && slot->cmd6_pending) {
+ if (host_done && !req->cmd->error) {
+ do_switch(host, slot->want_switch);
+ emmc_io_drive_setup(slot);
+ cvm_mmc_configure_delay(slot);
+ } else if (slot) {
+ slot->cmd6_pending = false;
+ }
+ }
+
+ slot->current_req = NULL;
req->done(req);
no_req_done:
@@ -609,9 +888,9 @@ static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
error:
WARN_ON_ONCE(1);
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
- /* Disable FIFO */
- writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
return 0;
}
@@ -653,7 +932,11 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
struct mmc_data *data;
- u64 emm_dma, addr;
+ u64 emm_dma, addr, int_enable_mask = 0;
+ int seg;
+
+ /* cleared by successful termination */
+ mrq->cmd->error = -EINVAL;
if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
!mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
@@ -662,17 +945,27 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
goto error;
}
+ /* unaligned multi-block DMA has problems, so forbid all unaligned */
+ for (seg = 0; seg < mrq->data->sg_len; seg++) {
+ struct scatterlist *sg = &mrq->data->sg[seg];
+ u64 align = (sg->offset | sg->length);
+
+ if (!(align & 7))
+ continue;
+ dev_info(&mmc->card->dev,
+ "Error:64bit alignment required\n");
+ goto error;
+ }
+
cvm_mmc_switch_to(slot);
data = mrq->data;
+
pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
data->blocks, data->blksz, data->blocks * data->blksz);
if (data->timeout_ns)
set_wdog(slot, data->timeout_ns);
- WARN_ON(host->current_req);
- host->current_req = mrq;
-
emm_dma = prepare_ext_dma(mmc, mrq);
addr = prepare_dma(host, data);
if (!addr) {
@@ -680,9 +973,19 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
goto error;
}
- host->dma_active = true;
- host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
- MIO_EMM_INT_DMA_ERR);
+ mrq->host = mmc;
+ WARN_ON(slot->current_req);
+ slot->current_req = mrq;
+ slot->dma_active = true;
+
+ int_enable_mask = MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
+ MIO_EMM_INT_DMA_ERR;
+
+ /* Add NCB_FLT interrupt for octtx2 */
+ if (is_mmc_otx2(host))
+ int_enable_mask |= MIO_EMM_INT_NCB_FLT;
+
+ host->int_enable(host, int_enable_mask);
if (host->dmar_fixup)
host->dmar_fixup(host, mrq->cmd, data, addr);
@@ -700,22 +1003,22 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
return;
error:
- mrq->cmd->error = -EINVAL;
if (mrq->done)
mrq->done(mrq);
host->release_bus(host);
}
-static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+static void do_read_request(struct cvm_mmc_slot *slot, struct mmc_request *mrq)
{
- sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
+ sg_miter_start(&slot->smi, mrq->data->sg, mrq->data->sg_len,
SG_MITER_ATOMIC | SG_MITER_TO_SG);
}
-static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+static void do_write_request(struct cvm_mmc_slot *slot, struct mmc_request *mrq)
{
+ struct cvm_mmc_host *host = slot->host;
unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
- struct sg_mapping_iter *smi = &host->smi;
+ struct sg_mapping_iter *smi = &slot->smi;
unsigned int bytes_xfered;
int shift = 56;
u64 dat = 0;
@@ -749,6 +1052,51 @@ static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
sg_miter_stop(smi);
}
+static void cvm_mmc_track_switch(struct cvm_mmc_slot *slot, u32 cmd_arg)
+{
+ u8 how = (cmd_arg >> 24) & 3;
+ u8 where = (u8)(cmd_arg >> 16);
+ u8 val = (u8)(cmd_arg >> 8);
+
+ slot->want_switch = slot->cached_switch;
+
+ /*
+ * track ext_csd assignments (how==3) for critical entries
+ * to make sure we follow up with MIO_EMM_SWITCH adjustment
+ * before ANY mmc/core interaction at old settings.
+ * Current mmc/core logic (linux 4.14) does not set/clear
+ * bits (how = 1 or 2), which would require more complex
+ * logic to track the intent of a change
+ */
+
+ if (how != 3)
+ return;
+
+ switch (where) {
+ case EXT_CSD_BUS_WIDTH:
+ slot->want_switch &= ~MIO_EMM_SWITCH_BUS_WIDTH;
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, val);
+ break;
+ case EXT_CSD_POWER_CLASS:
+ slot->want_switch &= ~MIO_EMM_SWITCH_POWER_CLASS;
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, val);
+ break;
+ case EXT_CSD_HS_TIMING:
+ slot->want_switch &= ~MIO_EMM_SWITCH_TIMING;
+ if (val)
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_TIMING,
+ (1 << (val - 1)));
+ break;
+ default:
+ return;
+ }
+
+ slot->cmd6_pending = true;
+}
+
static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct cvm_mmc_slot *slot = mmc_priv(mmc);
@@ -777,23 +1125,27 @@ static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
mods = cvm_mmc_get_cr_mods(cmd);
- WARN_ON(host->current_req);
- host->current_req = mrq;
+ WARN_ON(slot->current_req);
+ mrq->host = mmc;
+ slot->current_req = mrq;
if (cmd->data) {
if (cmd->data->flags & MMC_DATA_READ)
- do_read_request(host, mrq);
+ do_read_request(slot, mrq);
else
- do_write_request(host, mrq);
+ do_write_request(slot, mrq);
if (cmd->data->timeout_ns)
set_wdog(slot, cmd->data->timeout_ns);
} else
set_wdog(slot, 0);
- host->dma_active = false;
+ slot->dma_active = false;
host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
+ if (cmd->opcode == MMC_SWITCH)
+ cvm_mmc_track_switch(slot, cmd->arg);
+
emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
@@ -819,37 +1171,257 @@ retry:
if (!retries)
dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
+ if (cmd->opcode == MMC_SWITCH)
+ udelay(1300);
+}
+
+static void cvm_mmc_wait_done(struct mmc_request *cvm_mrq)
+{
+ complete(&cvm_mrq->completion);
+}
+
+static int cvm_mmc_r1_cmd(struct mmc_host *mmc, u32 *statp, u32 opcode)
+{
+ static struct mmc_command cmd = {};
+ static struct mmc_request cvm_mrq = {};
+
+ if (!opcode)
+ opcode = MMC_SEND_STATUS;
+ cmd.opcode = opcode;
+ if (mmc->card)
+ cmd.arg = mmc->card->rca << 16;
+ else
+ cmd.arg = 1 << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ cmd.data = NULL;
+ cvm_mrq.cmd = &cmd;
+
+ init_completion(&cvm_mrq.completion);
+ cvm_mrq.done = cvm_mmc_wait_done;
+
+ cvm_mmc_request(mmc, &cvm_mrq);
+ if (!wait_for_completion_timeout(&cvm_mrq.completion,
+ msecs_to_jiffies(10))) {
+ mmc_abort_tuning(mmc, opcode);
+ return -ETIMEDOUT;
+ }
+
+ if (statp)
+ *statp = cmd.resp[0];
+
+ return cvm_mrq.cmd->error;
+}
+
+static int cvm_mmc_data_tuning(struct mmc_host *mmc, u32 *statp, u32 opcode)
+{
+ int err = 0;
+ u8 *ext_csd;
+ static struct mmc_command cmd = {};
+ static struct mmc_data data = {};
+ static struct mmc_request cvm_mrq = {};
+ static struct scatterlist sg;
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct mmc_card *card = mmc->card;
+
+ if (!(slot->cached_switch & MIO_EMM_SWITCH_HS400_TIMING)) {
+ int edetail = -EINVAL;
+ int core_opinion;
+
+ core_opinion =
+ mmc_send_tuning(mmc, opcode, &edetail);
+
+ /* only accept mmc/core opinion when it's happy */
+ if (!core_opinion)
+ return core_opinion;
+ }
+
+ /* EXT_CSD supported only after ver 3 */
+ if (card && card->csd.mmca_vsn <= CSD_SPEC_VER_3)
+ return -EOPNOTSUPP;
+ /*
+ * As the ext_csd is so large and mostly unused, we don't store the
+ * raw block in mmc_card.
+ */
+ ext_csd = kzalloc(BLKSZ_EXT_CSD, GFP_KERNEL);
+ if (!ext_csd)
+ return -ENOMEM;
+
+ cvm_mrq.cmd = &cmd;
+ cvm_mrq.data = &data;
+ cmd.data = &data;
+
+ cmd.opcode = MMC_SEND_EXT_CSD;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = BLKSZ_EXT_CSD;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, ext_csd, BLKSZ_EXT_CSD);
+
+ /* set timeout */
+ if (card) {
+ /* SD cards use a 100 multiplier rather than 10 */
+ u32 mult = mmc_card_sd(card) ? 100 : 10;
+
+ data.timeout_ns = card->csd.taac_ns * mult;
+ data.timeout_clks = card->csd.taac_clks * mult;
+ } else {
+ data.timeout_ns = 50 * NSEC_PER_MSEC;
+ }
+
+ init_completion(&cvm_mrq.completion);
+ cvm_mrq.done = cvm_mmc_wait_done;
+
+ cvm_mmc_request(mmc, &cvm_mrq);
+ if (!wait_for_completion_timeout(&cvm_mrq.completion,
+ msecs_to_jiffies(100))) {
+ mmc_abort_tuning(mmc, cmd.opcode);
+ err = -ETIMEDOUT;
+ }
+
+ data.sg_len = 0; /* FIXME: catch over-time completions? */
+ kfree(ext_csd);
+
+ if (err)
+ return err;
+
+ if (statp)
+ *statp = cvm_mrq.cmd->resp[0];
+
+ return cvm_mrq.cmd->error;
+}
+
+/* adjusters for the 4 otx2 delay line taps */
+struct adj {
+ const char *name;
+ u64 mask;
+ int (*test)(struct mmc_host *mmc, u32 *statp, u32 opcode);
+ u32 opcode;
+ bool ddr_only;
+};
+
+static int adjust_tuning(struct mmc_host *mmc, struct adj *adj, u32 opcode)
+{
+ int err, start_run = -1, best_run = 0, best_start = -1;
+ int last_good = -1;
+ bool prev_ok = false;
+ u64 timing, tap;
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ char how[MAX_NO_OF_TAPS+1] = "";
+
+ /* loop over range+1 to simplify processing */
+ for (tap = 0; tap <= MAX_NO_OF_TAPS; tap++, prev_ok = !err) {
+ if (tap < MAX_NO_OF_TAPS) {
+ timing = readq(host->base + MIO_EMM_TIMING(host));
+ timing &= ~adj->mask;
+ timing |= (tap << __bf_shf(adj->mask));
+ writeq(timing, host->base + MIO_EMM_TIMING(host));
+
+ err = adj->test(mmc, NULL, opcode);
+
+ how[tap] = "-+"[!err];
+ if (!err)
+ last_good = tap;
+ } else {
+ /*
+ * putting the end+1 case in loop simplifies
+ * logic, allowing 'prev_ok' to process a
+ * sweet spot in tuning which extends to wall.
+ */
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ /*
+ * If no CRC/etc errors in response, but previous
+ * failed, note the start of a new run
+ */
+ if (!prev_ok)
+ start_run = tap;
+ } else if (prev_ok) {
+ int run = tap - 1 - start_run;
+
+ /* did we just exit a wider sweet spot? */
+ if (start_run >= 0 && run > best_run) {
+ best_start = start_run;
+ best_run = run;
+ }
+ }
+ }
+
+ if (best_start < 0) {
+ dev_warn(host->dev, "%s %lldMHz tuning %s failed\n",
+ mmc_hostname(mmc), slot->clock / 1000000, adj->name);
+ return -EINVAL;
+ }
+
+ tap = best_start + best_run / 2;
+ how[tap] = '@';
+ if (tapdance) {
+ tap = last_good - tapdance;
+ how[tap] = 'X';
+ }
+ dev_dbg(host->dev, "%s/%s %d/%lld/%d %s\n",
+ mmc_hostname(mmc), adj->name,
+ best_start, tap, best_start + best_run,
+ how);
+ slot->taps &= ~adj->mask;
+ slot->taps |= (tap << __bf_shf(adj->mask));
+ cvm_mmc_set_timing(slot);
+ return 0;
+}
+
+static u32 max_supported_frequency(struct cvm_mmc_host *host)
+{
+ /* Default maximum freqeuncey is 52000000 for chip prior to 9X */
+ u32 max_frequency = MHZ_52;
+
+ if (is_mmc_otx2(host)) {
+ /* Default max frequency is 200MHz for 9X chips */
+ max_frequency = MHZ_200;
+
+ /* Erratum is only applicable pass A0 */
+ if (is_mmc_otx2_A0(host))
+ max_frequency = MHZ_100;
+ }
+ return max_frequency;
}
static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
+
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
int clk_period = 0, power_class = 10, bus_width = 0;
- u64 clock, emm_switch;
+ u64 clock, emm_switch, mode;
+ u32 max_f;
+
+ if (ios->power_mode == MMC_POWER_OFF) {
+ if (host->powered) {
+ cvm_mmc_reset_bus(slot);
+ if (host->global_pwr_gpiod)
+ host->set_shared_power(host, 0);
+ else if (!IS_ERR_OR_NULL(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ host->powered = false;
+ }
+ set_wdog(slot, 0);
+ return;
+ }
host->acquire_bus(host);
cvm_mmc_switch_to(slot);
- /* Set the power state */
- switch (ios->power_mode) {
- case MMC_POWER_ON:
- break;
-
- case MMC_POWER_OFF:
- cvm_mmc_reset_bus(slot);
- if (host->global_pwr_gpiod)
- host->set_shared_power(host, 0);
- else if (!IS_ERR(mmc->supply.vmmc))
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
- break;
-
- case MMC_POWER_UP:
+ if (ios->power_mode == MMC_POWER_UP) {
if (host->global_pwr_gpiod)
host->set_shared_power(host, 1);
- else if (!IS_ERR(mmc->supply.vmmc))
+ else if (!IS_ERR_OR_NULL(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
- break;
}
/* Convert bus width to HW definition */
@@ -866,41 +1438,201 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* DDR is available for 4/8 bit bus width */
- if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
- bus_width |= 4;
+ switch (ios->timing) {
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ if (ios->bus_width)
+ bus_width |= 4;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ if (ios->bus_width & 2)
+ bus_width |= 4;
+ break;
+ }
/* Change the clock frequency. */
clock = ios->clock;
- if (clock > 52000000)
- clock = 52000000;
+ max_f = max_supported_frequency(host);
+
+ if (clock < mmc->f_min)
+ clock = mmc->f_min;
+ if (clock > max_f)
+ clock = max_f;
+
slot->clock = clock;
- if (clock)
- clk_period = (host->sys_freq + clock - 1) / (2 * clock);
+ if (clock) {
+ clk_period = host->sys_freq / (2 * clock);
+ /* check to not exceed requested speed */
+ while (1) {
+ int hz = host->sys_freq / (2 * clk_period);
- emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
- (ios->timing == MMC_TIMING_MMC_HS)) |
+ if (hz <= clock)
+ break;
+ clk_period++;
+ }
+ }
+
+ emm_switch =
FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING, 1);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS200_TIMING, 1);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS400_TIMING, 1);
+ break;
+ }
set_bus_id(&emm_switch, slot->bus_id);
+ pr_debug("mmc-slot%d trying switch %llx w%lld hs%lld hs200:%lld hs400:%lld\n",
+ slot->bus_id, emm_switch,
+ FIELD_GET(MIO_EMM_SWITCH_BUS_WIDTH, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS_TIMING, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS200_TIMING, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS400_TIMING, emm_switch));
+
if (!switch_val_changed(slot, emm_switch))
goto out;
set_wdog(slot, 0);
do_switch(host, emm_switch);
+
+ mode = readq(host->base + MIO_EMM_MODE(host, slot->bus_id));
+ pr_debug("mmc-slot%d mode %llx w%lld hs%lld hs200:%lld hs400:%lld\n",
+ slot->bus_id, mode,
+ (mode >> 40) & 7, (mode >> 48) & 1,
+ (mode >> 49) & 1, (mode >> 50) & 1);
+
slot->cached_switch = emm_switch;
+ host->powered = true;
+ cvm_mmc_configure_delay(slot);
out:
host->release_bus(host);
}
+static struct adj adj[] = {
+ { "CMD_IN", MIO_EMM_TIMING_CMD_IN,
+ cvm_mmc_r1_cmd, MMC_SEND_STATUS, },
+ { "DATA_IN", MIO_EMM_TIMING_DATA_IN,
+ cvm_mmc_data_tuning, },
+ { NULL, },
+};
+
+static int cvm_scan_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct adj *a;
+ int ret;
+
+ for (a = adj; a->name; a++) {
+ if (a->ddr_only && !cvm_is_mmc_timing_ddr(slot))
+ continue;
+
+ ret = adjust_tuning(mmc, a,
+ a->opcode ?: opcode);
+
+ if (ret)
+ return ret;
+ }
+
+ cvm_mmc_set_timing(slot);
+ return 0;
+}
+
+static int cvm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ int clk_period, hz;
+
+ int ret;
+
+ do {
+ u64 emm_switch =
+ readq(host->base + MIO_EMM_MODE(host, slot->bus_id));
+
+ clk_period = FIELD_GET(MIO_EMM_SWITCH_CLK_LO, emm_switch);
+ dev_info(slot->host->dev, "%s re-tuning\n",
+ mmc_hostname(mmc));
+ ret = cvm_scan_tuning(mmc, opcode);
+ if (ret) {
+ int inc = clk_period >> 3;
+
+ if (!inc)
+ inc++;
+ clk_period += inc;
+ hz = host->sys_freq / (2 * clk_period);
+ pr_debug("clk_period %d += %d, now %d Hz\n",
+ clk_period - inc, inc, hz);
+
+ if (hz < 400000)
+ break;
+
+ slot->clock = hz;
+ mmc->ios.clock = hz;
+
+ emm_switch &= ~MIO_EMM_SWITCH_CLK_LO;
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
+ clk_period);
+ emm_switch &= ~MIO_EMM_SWITCH_CLK_HI;
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
+ clk_period);
+ do_switch(host, emm_switch);
+ }
+ } while (ret);
+
+ return ret;
+}
+
+static int cvm_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+
+ return cvm_mmc_configure_delay(slot);
+}
+
+static void cvm_mmc_reset(struct mmc_host *mmc)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ u64 r;
+
+ cvm_mmc_reset_bus(slot);
+
+ r = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
+ FIELD_PREP(MIO_EMM_CMD_BUS_ID, slot->bus_id);
+
+ writeq(r, host->base + MIO_EMM_CMD(host));
+
+ do {
+ r = readq(host->base + MIO_EMM_RSP_STS(host));
+ } while (!(r & MIO_EMM_RSP_STS_CMD_DONE));
+}
+
static const struct mmc_host_ops cvm_mmc_ops = {
.request = cvm_mmc_request,
.set_ios = cvm_mmc_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
+ .hw_reset = cvm_mmc_reset,
+ .execute_tuning = cvm_execute_tuning,
+ .prepare_hs400_tuning = cvm_prepare_hs400_tuning,
};
static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
@@ -917,7 +1649,7 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
struct cvm_mmc_host *host = slot->host;
u64 emm_switch;
- /* Enable this bus slot. */
+ /* Enable this bus slot. Overridden when vqmmc-switching engaged */
host->emm_cfg |= (1ull << slot->bus_id);
writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
udelay(10);
@@ -933,8 +1665,8 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
/* Make the changes take effect on this bus slot. */
set_bus_id(&emm_switch, slot->bus_id);
do_switch(host, emm_switch);
-
slot->cached_switch = emm_switch;
+ host->powered = true;
/*
* Set watchdog timeout value and default reset value
@@ -953,7 +1685,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
struct device_node *node = dev->of_node;
struct mmc_host *mmc = slot->mmc;
- u64 clock_period;
+ u32 max_frequency, current_drive, clk_slew;
int ret;
ret = of_property_read_u32(node, "reg", &id);
@@ -962,8 +1694,14 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
return ret;
}
- if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
- dev_err(dev, "Invalid reg property on %pOF\n", node);
+ if (id >= CAVIUM_MAX_MMC) {
+ dev_err(dev, "Invalid reg=<%d> property on %pOF\n", id, node);
+ return -EINVAL;
+ }
+
+ if (slot->host->slot[id]) {
+ dev_err(dev, "Duplicate reg=<%d> property on %pOF\n",
+ id, node);
return -EINVAL;
}
@@ -974,7 +1712,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
* Legacy Octeon firmware has no regulator entry, fall-back to
* a hard-coded voltage to get a sane OCR.
*/
- if (IS_ERR(mmc->supply.vmmc))
+ if (IS_ERR_OR_NULL(mmc->supply.vmmc))
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* Common MMC bindings */
@@ -982,7 +1720,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
if (ret)
return ret;
- /* Set bus width */
+ /* Set bus width from obsolete properties, if unset */
if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
if (bus_width == 8)
@@ -991,19 +1729,40 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
mmc->caps |= MMC_CAP_4_BIT_DATA;
}
+ max_frequency = max_supported_frequency(slot->host);
+
/* Set maximum and minimum frequency */
if (!mmc->f_max)
of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
- if (!mmc->f_max || mmc->f_max > 52000000)
- mmc->f_max = 52000000;
- mmc->f_min = 400000;
+ if (!mmc->f_max || mmc->f_max > max_frequency)
+ mmc->f_max = max_frequency;
+ mmc->f_min = KHZ_400;
/* Sampling register settings, period in picoseconds */
- clock_period = 1000000000000ull / slot->host->sys_freq;
of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
- slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
- slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
+ if (is_mmc_8xxx(slot->host) || is_mmc_otx2(slot->host)) {
+ slot->cmd_cnt = cmd_skew;
+ slot->data_cnt = dat_skew;
+ } else {
+ u64 clock_period = 1000000000000ull / slot->host->sys_freq;
+
+ slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
+ slot->data_cnt = (dat_skew + clock_period / 2) / clock_period;
+ }
+
+ /* Get current drive and clk skew */
+ ret = of_property_read_u32(node, "cavium,drv-strength", &current_drive);
+ if (ret)
+ slot->drive = -1;
+ else
+ slot->drive = current_drive;
+
+ ret = of_property_read_u32(node, "cavium,clk-slew", &clk_slew);
+ if (ret)
+ slot->slew = -1;
+ else
+ slot->slew = clk_slew;
return id;
}
@@ -1012,6 +1771,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
{
struct cvm_mmc_slot *slot;
struct mmc_host *mmc;
+ struct iommu_domain *dom;
int ret, id;
mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
@@ -1030,16 +1790,19 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
/* Set up host parameters */
mmc->ops = &cvm_mmc_ops;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_BUS_WIDTH_TEST;
+ mmc->caps |= MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD;
+
/*
- * We only have a 3.3v supply, we cannot support any
- * of the UHS modes. We do support the high speed DDR
- * modes up to 52MHz.
+ * We only have a 3.3v supply for slots, we cannot
+ * support any of the UHS modes. We do support the
+ * high speed DDR modes up to 52MHz.
*
* Disable bounce buffers for max_segs = 1
*/
- mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
- MMC_CAP_3_3V_DDR;
+
+ if (!is_mmc_otx2(host))
+ mmc->caps |= MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
@@ -1055,14 +1818,30 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
/* DMA block count field is 15 bits */
mmc->max_blk_count = 32767;
+ dom = iommu_get_domain_for_dev(dev->parent);
+ if (dom && dom->type == IOMMU_DOMAIN_IDENTITY) {
+ unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+
+ if (mmc->max_seg_size > max_size)
+ mmc->max_seg_size = max_size;
+
+ max_size *= mmc->max_segs;
+
+ if (mmc->max_req_size > max_size)
+ mmc->max_req_size = max_size;
+ }
+
+ mmc_can_retune(mmc);
+
slot->clock = mmc->f_min;
slot->bus_id = id;
slot->cached_rca = 1;
host->acquire_bus(host);
host->slot[id] = slot;
- cvm_mmc_switch_to(slot);
+ host->use_vqmmc |= !IS_ERR_OR_NULL(slot->mmc->supply.vqmmc);
cvm_mmc_init_lowlevel(slot);
+ cvm_mmc_switch_to(slot);
host->release_bus(host);
ret = mmc_add_host(mmc);
diff --git a/drivers/mmc/host/cavium.h b/drivers/mmc/host/cavium.h
index f3eea5eaa678..f38353171678 100644
--- a/drivers/mmc/host/cavium.h
+++ b/drivers/mmc/host/cavium.h
@@ -19,8 +19,42 @@
#include <linux/of.h>
#include <linux/scatterlist.h>
#include <linux/semaphore.h>
+#include <linux/pci.h>
#define CAVIUM_MAX_MMC 4
+#define BLKSZ_EXT_CSD 512
+#define MRVL_OCTEONTX2_96XX_PARTNUM 0xB2
+
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_8XXX 0xA
+#define PCI_SUBSYS_DEVID_9XXX 0xB
+#define PCI_SUBSYS_DEVID_95XX 0xB3
+
+#define KHZ_400 (400000)
+#define MHZ_26 (26000000)
+#define MHZ_52 (52000000)
+#define MHZ_100 (100000000)
+#define MHZ_200 (200000000)
+
+/* octtx2: emmc interface io current drive strength */
+#define MILLI_AMP_2 (0x0)
+#define MILLI_AMP_4 (0x1)
+#define MILLI_AMP_8 (0x2)
+#define MILLI_AMP_16 (0x3)
+
+/* octtx2: emmc interface io clk skew */
+#define LOW_SLEW_RATE (0x0)
+#define HIGH_SLEW_RATE (0x1)
+
+/* octtx2: emmc interface calibration */
+#define START_CALIBRATION (0x1)
+#define TOTAL_NO_OF_TAPS (512)
+#define PS_10000 (10 * 1000)
+#define PS_5000 (5000)
+#define PS_2500 (2500)
+#define PS_400 (400)
+#define MAX_NO_OF_TAPS 64
+
/* DMA register addresses */
#define MIO_EMM_DMA_FIFO_CFG(x) (0x00 + x->reg_off_dma)
@@ -33,8 +67,17 @@
#define MIO_EMM_DMA_INT_ENA_W1S(x) (0x40 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_ENA_W1C(x) (0x48 + x->reg_off_dma)
+/* octtx2 specific registers */
+#define MIO_EMM_CALB(x) (0xC0 + x->reg_off)
+#define MIO_EMM_TAP(x) (0xC8 + x->reg_off)
+#define MIO_EMM_TIMING(x) (0xD0 + x->reg_off)
+#define MIO_EMM_DEBUG(x) (0xF8 + x->reg_off)
+
/* register addresses */
#define MIO_EMM_CFG(x) (0x00 + x->reg_off)
+#define MIO_EMM_MODE(x, s) (0x08 + 8*(s) + (x)->reg_off)
+/* octtx2 specific register */
+#define MIO_EMM_IO_CTL(x) (0x40 + x->reg_off)
#define MIO_EMM_SWITCH(x) (0x48 + x->reg_off)
#define MIO_EMM_DMA(x) (0x50 + x->reg_off)
#define MIO_EMM_CMD(x) (0x58 + x->reg_off)
@@ -56,6 +99,7 @@ struct cvm_mmc_host {
struct device *dev;
void __iomem *base;
void __iomem *dma_base;
+ struct pci_dev *pdev;
int reg_off;
int reg_off_dma;
u64 emm_cfg;
@@ -64,12 +108,10 @@ struct cvm_mmc_host {
struct clk *clk;
int sys_freq;
- struct mmc_request *current_req;
- struct sg_mapping_iter smi;
- bool dma_active;
bool use_sg;
-
bool has_ciu3;
+ bool powered;
+ bool use_vqmmc; /* must disable slots over switch */
bool big_dma_addr;
bool need_irq_handler_lock;
spinlock_t irq_handler_lock;
@@ -80,6 +122,9 @@ struct cvm_mmc_host {
struct cvm_mmc_slot *slot[CAVIUM_MAX_MMC];
struct platform_device *slot_pdev[CAVIUM_MAX_MMC];
+ /* octtx2 specific */
+ unsigned int per_tap_delay; /* per tap delay in pico second */
+ unsigned long delay_logged; /* per-ios.timing bitmask */
void (*set_shared_power)(struct cvm_mmc_host *, int);
void (*acquire_bus)(struct cvm_mmc_host *);
@@ -94,16 +139,27 @@ struct cvm_mmc_host {
struct cvm_mmc_slot {
struct mmc_host *mmc; /* slot-level mmc_core object */
struct cvm_mmc_host *host; /* common hw for all slots */
+ struct mmc_request *current_req;
u64 clock;
+ u32 ecount, gcount;
u64 cached_switch;
u64 cached_rca;
- unsigned int cmd_cnt; /* sample delay */
- unsigned int dat_cnt; /* sample delay */
+ struct sg_mapping_iter smi;
+ bool dma_active;
+
+ u64 taps; /* otx2: MIO_EMM_TIMING */
+ unsigned int cmd_cnt; /* otx: sample cmd in delay */
+ unsigned int data_cnt; /* otx: sample data in delay */
+
+ int drive; /* Current drive */
+ int slew; /* clock skew */
int bus_id;
+ bool cmd6_pending;
+ u64 want_switch;
};
struct cvm_mmc_cr_type {
@@ -161,6 +217,21 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_DMA_CFG_SIZE GENMASK_ULL(55, 36)
#define MIO_EMM_DMA_CFG_ADR GENMASK_ULL(35, 0)
+#define MIO_EMM_CFG_BUS_ENA GENMASK_ULL(3, 0)
+
+#define MIO_EMM_IO_CTL_DRIVE GENMASK_ULL(3, 2)
+#define MIO_EMM_IO_CTL_SLEW BIT_ULL(0)
+
+#define MIO_EMM_CALB_START BIT_ULL(0)
+#define MIO_EMM_TAP_DELAY GENMASK_ULL(7, 0)
+
+#define MIO_EMM_TIMING_CMD_IN GENMASK_ULL(53, 48)
+#define MIO_EMM_TIMING_CMD_OUT GENMASK_ULL(37, 32)
+#define MIO_EMM_TIMING_DATA_IN GENMASK_ULL(21, 16)
+#define MIO_EMM_TIMING_DATA_OUT GENMASK_ULL(5, 0)
+
+#define MIO_EMM_INT_NCB_RAS BIT_ULL(8)
+#define MIO_EMM_INT_NCB_FLT BIT_ULL(7)
#define MIO_EMM_INT_SWITCH_ERR BIT_ULL(6)
#define MIO_EMM_INT_SWITCH_DONE BIT_ULL(5)
#define MIO_EMM_INT_DMA_ERR BIT_ULL(4)
@@ -169,6 +240,9 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_INT_CMD_DONE BIT_ULL(1)
#define MIO_EMM_INT_BUF_DONE BIT_ULL(0)
+#define MIO_EMM_DMA_INT_FIFO BIT_ULL(1)
+#define MIO_EMM_DMA_INT_DMA BIT_ULL(0)
+
#define MIO_EMM_RSP_STS_BUS_ID GENMASK_ULL(61, 60)
#define MIO_EMM_RSP_STS_CMD_VAL BIT_ULL(59)
#define MIO_EMM_RSP_STS_SWITCH_VAL BIT_ULL(58)
@@ -200,9 +274,14 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_SWITCH_ERR0 BIT_ULL(58)
#define MIO_EMM_SWITCH_ERR1 BIT_ULL(57)
#define MIO_EMM_SWITCH_ERR2 BIT_ULL(56)
+#define MIO_EMM_SWITCH_ERRS GENMASK_ULL(58, 56)
+#define MIO_EMM_SWITCH_HS400_TIMING BIT_ULL(50)
+#define MIO_EMM_SWITCH_HS200_TIMING BIT_ULL(49)
#define MIO_EMM_SWITCH_HS_TIMING BIT_ULL(48)
+#define MIO_EMM_SWITCH_TIMING GENMASK_ULL(50, 48)
#define MIO_EMM_SWITCH_BUS_WIDTH GENMASK_ULL(42, 40)
#define MIO_EMM_SWITCH_POWER_CLASS GENMASK_ULL(35, 32)
+#define MIO_EMM_SWITCH_CLK GENMASK_ULL(31, 0)
#define MIO_EMM_SWITCH_CLK_HI GENMASK_ULL(31, 16)
#define MIO_EMM_SWITCH_CLK_LO GENMASK_ULL(15, 0)
@@ -210,6 +289,52 @@ struct cvm_mmc_cr_mods {
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id);
int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host);
int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot);
+
extern const char *cvm_mmc_irq_names[];
+static inline bool is_mmc_8xxx(struct cvm_mmc_host *host)
+{
+#ifdef CONFIG_ARM64
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_8XXX);
+#else
+ return false;
+#endif
+}
+
+static inline bool is_mmc_otx2(struct cvm_mmc_host *host)
+{
+#ifdef CONFIG_ARM64
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_9XXX);
+#else
+ return false;
+#endif
+}
+
+static inline bool is_mmc_otx2_A0(struct cvm_mmc_host *host)
+{
+#ifdef CONFIG_ARM64
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 8) & 0xFF;
+
+ return (pdev->revision == 0x00) &&
+ (chip_id == MRVL_OCTEONTX2_96XX_PARTNUM);
+#else
+ return false;
+#endif
+}
+
+static inline bool is_mmc_95xx(struct cvm_mmc_host *host)
+{
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 8) & 0xFF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_95XX);
+}
+
#endif
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 5047f7343ffc..c19f4c3f115a 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -343,12 +344,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
/* CQHCI is idle and should halt immediately, so set a small timeout */
#define CQHCI_OFF_TIMEOUT 100
+static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL);
+}
+
static void cqhci_off(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
- ktime_t timeout;
- bool timed_out;
u32 reg;
+ int err;
if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
return;
@@ -358,15 +363,9 @@ static void cqhci_off(struct mmc_host *mmc)
cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
- timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
- while (1) {
- timed_out = ktime_compare(ktime_get(), timeout) > 0;
- reg = cqhci_readl(cq_host, CQHCI_CTL);
- if ((reg & CQHCI_HALT) || timed_out)
- break;
- }
-
- if (timed_out)
+ err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
+ reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
+ if (err < 0)
pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
else
pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index ba9a63db73da..360d523132bd 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -246,6 +246,9 @@ static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
mrq = host->mrq;
+ if (host->cmd->error)
+ meson_mx_mmc_soft_reset(host);
+
host->mrq = NULL;
host->cmd = NULL;
@@ -357,14 +360,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
meson_mx_mmc_start_cmd(mmc, mrq->cmd);
}
-static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
-{
- struct meson_mx_mmc_host *host = mmc_priv(mmc);
- u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
-
- return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
-}
-
static void meson_mx_mmc_read_response(struct mmc_host *mmc,
struct mmc_command *cmd)
{
@@ -506,7 +501,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
static struct mmc_host_ops meson_mx_mmc_ops = {
.request = meson_mx_mmc_request,
.set_ios = meson_mx_mmc_set_ios,
- .card_busy = meson_mx_mmc_card_busy,
.get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
};
@@ -570,7 +564,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
mmc->f_max = clk_round_rate(host->cfg_div_clk,
clk_get_rate(host->parent_clk));
- mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 19544b121276..2a9af943709d 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
* SPI protocol. Another is that when chipselect is released while
* the card returns BUSY status, the clock must issue several cycles
* with chipselect high before the card will stop driving its output.
+ *
+ * SPI_CS_HIGH means "asserted" here. In some cases like when using
+ * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
+ * inverted by gpiolib, so if we want to ascertain to drive it high
+ * we should toggle the default with an XOR as we do here.
*/
- host->spi->mode |= SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Just warn; most cards work without it. */
dev_warn(&host->spi->dev,
"can't change chip-select polarity\n");
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
} else {
mmc_spi_readbytes(host, 18);
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Wot, we can't get the same setup we had before? */
dev_err(&host->spi->dev,
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b8554bf38f72..da59512e9411 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -168,6 +168,8 @@ static struct variant_data variant_ux500 = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -201,6 +203,8 @@ static struct variant_data variant_ux500v2 = {
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -260,6 +264,7 @@ static struct variant_data variant_stm32_sdmmc = {
.datacnt_useless = true,
.datalength_bits = 25,
.datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
.stm32_idmabsize_mask = GENMASK(12, 5),
.init = sdmmc_variant_init,
};
@@ -279,6 +284,7 @@ static struct variant_data variant_qcom = {
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
@@ -447,10 +453,11 @@ void mmci_dma_setup(struct mmci_host *host)
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
+
if (!data)
return 0;
-
- if (!is_power_of_2(data->blksz)) {
+ if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
@@ -515,7 +522,9 @@ int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
- host->ops->dma_start(host, &datactrl);
+ ret = host->ops->dma_start(host, &datactrl);
+ if (ret)
+ return ret;
/* Trigger the DMA transfer */
mmci_write_datactrlreg(host, datactrl);
@@ -822,6 +831,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
+ /*
+ * This is necessary to get SDIO working on the Ux500. We do not yet
+ * know if this is a bug in:
+ * - The Ux500 DMA controller (DMA40)
+ * - The MMCI DMA interface on the Ux500
+ * some power of two blocks (such as 64 bytes) are sent regularly
+ * during SDIO traffic and those work fine so for these we enable DMA
+ * transfers.
+ */
+ if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
+ return -EINVAL;
+
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
@@ -872,9 +893,14 @@ int mmci_dmae_prep_data(struct mmci_host *host,
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
+ int ret;
host->dma_in_progress = true;
- dmaengine_submit(dmae->desc_current);
+ ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
+ if (ret < 0) {
+ host->dma_in_progress = false;
+ return ret;
+ }
dma_async_issue_pending(dmae->cur);
*datactrl |= MCI_DPSM_DMAENABLE;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 833236ecb31e..89ab73343cf3 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -278,7 +278,11 @@ struct mmci_host;
* @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @datactrl_blksz: block size in power of two
+ * @datactrl_blocksz: block size in power of two
+ * @datactrl_any_blocksz: true if block any block sizes are accepted by
+ * hardware, such as with some SDIO traffic that send
+ * odd packets.
+ * @dma_power_of_2: DMA only works with blocks that are a power of 2.
* @datactrl_first: true if data must be setup before send command
* @datacnt_useless: true if you could not use datacnt register to read
* remaining data
@@ -323,6 +327,8 @@ struct variant_data {
unsigned int datactrl_mask_ddrmode;
unsigned int datactrl_mask_sdio;
unsigned int datactrl_blocksz;
+ u8 datactrl_any_blocksz:1;
+ u8 dma_power_of_2:1;
u8 datactrl_first:1;
u8 datacnt_useless:1;
u8 st_sdio:1;
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 8e83ae6920ae..0953bd8a4f79 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -162,6 +162,9 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+
+ if (!data->host_cookie)
+ sdmmc_idma_unprep_data(host, data, 0);
}
static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 978c8ccce7e3..cd4094275056 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -227,6 +227,7 @@
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
@@ -1870,6 +1871,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
/* select EMMC50 PAD CMD tune */
sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
+ sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 952fa4063ff8..d0df054b0b47 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1512,6 +1512,36 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
if (mmc_pdata(host)->init_card)
mmc_pdata(host)->init_card(card);
+ else if (card->type == MMC_TYPE_SDIO ||
+ card->type == MMC_TYPE_SD_COMBO) {
+ struct device_node *np = mmc_dev(mmc)->of_node;
+
+ /*
+ * REVISIT: should be moved to sdio core and made more
+ * general e.g. by expanding the DT bindings of child nodes
+ * to provide a mechanism to provide this information:
+ * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ */
+
+ np = of_get_compatible_child(np, "ti,wl1251");
+ if (np) {
+ /*
+ * We have TI wl1251 attached to MMC3. Pass this
+ * information to the SDIO core because it can't be
+ * probed by normal methods.
+ */
+
+ dev_info(host->dev, "found wl1251\n");
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 24000000;
+ card->ocr = 0x80;
+ of_node_put(np);
+ }
+ }
}
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 024acc1b0a2e..b2bbcb09a49e 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -740,16 +740,16 @@ static int pxamci_probe(struct platform_device *pdev)
goto out;
}
+ if (!host->pdata->gpio_card_ro_invert)
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_ro\n");
goto out;
}
- if (!ret) {
+ if (!ret)
host->use_ro_gpio = true;
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
- }
if (host->pdata->init)
host->pdata->init(dev, pxamci_detect_irq, mmc);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 5f8d57ac084f..474df33de02b 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -644,8 +644,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
struct tmio_mmc_dma *dma_priv;
struct tmio_mmc_host *host;
struct renesas_sdhi *priv;
+ int num_irqs, irq, ret, i;
struct resource *res;
- int irq, ret, i;
u16 ver;
of_data = of_device_get_match_data(&pdev->dev);
@@ -823,24 +823,31 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->hs400_complete = renesas_sdhi_hs400_complete;
}
- i = 0;
- while (1) {
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0) {
+ ret = num_irqs;
+ goto eirq;
+ }
+
+ /* There must be at least one IRQ source */
+ if (!num_irqs) {
+ ret = -ENXIO;
+ goto eirq;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
- if (irq < 0)
- break;
- i++;
+ if (irq < 0) {
+ ret = irq;
+ goto eirq;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
}
- /* There must be at least one IRQ source */
- if (!i) {
- ret = irq;
- goto eirq;
- }
-
dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
mmc_hostname(host->mmc), (unsigned long)
(platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 751fe91c7571..63eb203bba61 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -229,15 +229,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
DTRAN_CTRL_DM_START);
}
-static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
+static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
enum dma_data_direction dir;
- spin_lock_irq(&host->lock);
-
if (!host->data)
- goto out;
+ return false;
if (host->data->flags & MMC_DATA_READ)
dir = DMA_FROM_DEVICE;
@@ -250,6 +247,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
if (dir == DMA_FROM_DEVICE)
clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+ return true;
+}
+
+static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+
+ spin_lock_irq(&host->lock);
+ if (!renesas_sdhi_internal_dmac_complete(host))
+ goto out;
+
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 1604f512c7bd..01fc437ed965 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -602,10 +602,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
}
static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
- .chip = &sdhci_acpi_chip_amd,
- .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
- .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
- SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
};
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 44139fceac24..58d2fdd6155f 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -11,6 +11,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include "sdhci-pltfm.h"
@@ -193,52 +194,6 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
}
-static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
- unsigned int timing)
-{
- struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
- u32 mode;
-
- switch (timing) {
- case MMC_TIMING_MMC_HS:
- mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
- break;
- case MMC_TIMING_MMC_DDR52:
- mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
- break;
- case MMC_TIMING_MMC_HS200:
- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
- break;
- case MMC_TIMING_MMC_HS400:
- if (priv->enhanced_strobe)
- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
- else
- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
- break;
- default:
- mode = SDHCI_CDNS_HRS06_MODE_SD;
- break;
- }
-
- sdhci_cdns_set_emmc_mode(priv, mode);
-
- /* For SD, fall back to the default handler */
- if (mode == SDHCI_CDNS_HRS06_MODE_SD)
- sdhci_set_uhs_signaling(host, timing);
-}
-
-static const struct sdhci_ops sdhci_cdns_ops = {
- .set_clock = sdhci_set_clock,
- .get_timeout_clock = sdhci_cdns_get_timeout_clock,
- .set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
- .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
-};
-
-static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
- .ops = &sdhci_cdns_ops,
-};
-
static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
{
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
@@ -272,23 +227,24 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
return 0;
}
-static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
+/*
+ * In SD mode, software must not use the hardware tuning and instead perform
+ * an almost identical procedure to eMMC.
+ */
+static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
{
- struct sdhci_host *host = mmc_priv(mmc);
int cur_streak = 0;
int max_streak = 0;
int end_of_streak = 0;
int i;
/*
- * This handler only implements the eMMC tuning that is specific to
- * this controller. Fall back to the standard method for SD timing.
+ * Do not execute tuning for UHS_SDR50 or UHS_DDR50.
+ * The delay is set by probe, based on the DT properties.
*/
- if (host->timing != MMC_TIMING_MMC_HS200)
- return sdhci_execute_tuning(mmc, opcode);
-
- if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200))
- return -EINVAL;
+ if (host->timing != MMC_TIMING_MMC_HS200 &&
+ host->timing != MMC_TIMING_UHS_SDR104)
+ return 0;
for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
if (sdhci_cdns_set_tune_val(host, i) ||
@@ -311,6 +267,58 @@ static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
}
+static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ u32 mode;
+
+ switch (timing) {
+ case MMC_TIMING_MMC_HS:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ if (priv->enhanced_strobe)
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
+ else
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
+ break;
+ default:
+ mode = SDHCI_CDNS_HRS06_MODE_SD;
+ break;
+ }
+
+ sdhci_cdns_set_emmc_mode(priv, mode);
+
+ /* For SD, fall back to the default handler */
+ if (mode == SDHCI_CDNS_HRS06_MODE_SD)
+ sdhci_set_uhs_signaling(host, timing);
+}
+
+static const struct sdhci_ops sdhci_cdns_ops = {
+ .set_clock = sdhci_set_clock,
+ .get_timeout_clock = sdhci_cdns_get_timeout_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .platform_execute_tuning = sdhci_cdns_execute_tuning,
+ .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
+ .ops = &sdhci_cdns_ops,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
+static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
+ .ops = &sdhci_cdns_ops,
+};
+
static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -334,6 +342,7 @@ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
static int sdhci_cdns_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
+ const struct sdhci_pltfm_data *data;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_cdns_priv *priv;
struct clk *clk;
@@ -350,9 +359,13 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
if (ret)
return ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ data = &sdhci_cdns_pltfm_data;
+
nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
priv_size = sizeof(*priv) + sizeof(priv->phy_params[0]) * nr_phy_params;
- host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, priv_size);
+ host = sdhci_pltfm_init(pdev, data, priv_size);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
goto disable_clk;
@@ -366,7 +379,6 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
priv->hrs_addr = host->ioaddr;
priv->enhanced_strobe = false;
host->ioaddr += SDHCI_CDNS_SRS_BASE;
- host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_cdns_hs400_enhanced_strobe;
sdhci_enable_v4_mode(host);
@@ -430,7 +442,10 @@ static const struct dev_pm_ops sdhci_cdns_pm_ops = {
};
static const struct of_device_id sdhci_cdns_match[] = {
- { .compatible = "socionext,uniphier-sd4hc" },
+ {
+ .compatible = "socionext,uniphier-sd4hc",
+ .data = &sdhci_cdns_uniphier_pltfm_data,
+ },
{ .compatible = "cdns,sd4hc" },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 776a94216248..3ba49f80db9b 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -87,7 +87,7 @@
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
-#define ESDHC_TUNING_START_TAP_MASK 0xff
+#define ESDHC_TUNING_START_TAP_MASK 0x7f
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@@ -1381,13 +1381,14 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
host->mmc->parent->platform_data);
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request write-protect gpio!\n");
return err;
}
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
}
/* card_detect */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 9cf14b359c14..256d9173acd5 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -99,7 +99,7 @@
#define CORE_PWRSAVE_DLL BIT(3)
-#define DDR_CONFIG_POR_VAL 0x80040853
+#define DDR_CONFIG_POR_VAL 0x80040873
#define INVALID_TUNING_PHASE -1
@@ -148,8 +148,9 @@ struct sdhci_msm_offset {
u32 core_ddr_200_cfg;
u32 core_vendor_spec3;
u32 core_dll_config_2;
+ u32 core_dll_config_3;
+ u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
u32 core_ddr_config;
- u32 core_ddr_config_2;
};
static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
@@ -177,8 +178,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
.core_ddr_200_cfg = 0x224,
.core_vendor_spec3 = 0x250,
.core_dll_config_2 = 0x254,
- .core_ddr_config = 0x258,
- .core_ddr_config_2 = 0x25c,
+ .core_dll_config_3 = 0x258,
+ .core_ddr_config = 0x25c,
};
static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
@@ -207,8 +208,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
.core_ddr_200_cfg = 0x184,
.core_vendor_spec3 = 0x1b0,
.core_dll_config_2 = 0x1b4,
- .core_ddr_config = 0x1b8,
- .core_ddr_config_2 = 0x1bc,
+ .core_ddr_config_old = 0x1b8,
+ .core_ddr_config = 0x1bc,
};
struct sdhci_msm_variant_ops {
@@ -253,6 +254,7 @@ struct sdhci_msm_host {
const struct sdhci_msm_offset *offset;
bool use_cdr;
u32 transfer_mode;
+ bool updated_ddr_cfg;
};
static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
@@ -924,8 +926,10 @@ out:
static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
{
struct mmc_host *mmc = host->mmc;
- u32 dll_status, config;
+ u32 dll_status, config, ddr_cfg_offset;
int ret;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_msm_offset *msm_offset =
sdhci_priv_msm_offset(host);
@@ -938,8 +942,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
* bootloaders. In the future, if this changes, then the desired
* values will need to be programmed appropriately.
*/
- writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
- msm_offset->core_ddr_config);
+ if (msm_host->updated_ddr_cfg)
+ ddr_cfg_offset = msm_offset->core_ddr_config;
+ else
+ ddr_cfg_offset = msm_offset->core_ddr_config_old;
+ writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
if (mmc->ios.enhanced_strobe) {
config = readl_relaxed(host->ioaddr +
@@ -1106,6 +1113,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
msm_host->use_cdr = true;
/*
+ * Clear tuning_done flag before tuning to ensure proper
+ * HS400 settings.
+ */
+ msm_host->tuning_done = 0;
+
+ /*
* For HS400 tuning in HS200 timing requires:
* - select MCLK/2 in VENDOR_SPEC
* - program MCLK to 400MHz (or nearest supported) in GCC
@@ -1729,7 +1742,9 @@ static const struct sdhci_ops sdhci_msm_ops = {
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_msm_ops,
};
@@ -1899,6 +1914,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_offset->core_vendor_spec_capabilities0);
}
+ if (core_major == 1 && core_minor >= 0x49)
+ msm_host->updated_ddr_cfg = true;
+
/*
* Power on reset state may trigger power irq if previous status of
* PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
@@ -1936,6 +1954,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index e7d1920729fb..881f8138e7de 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -118,7 +118,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ || mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
}
@@ -324,19 +325,22 @@ static int sdhci_at91_probe(struct platform_device *pdev)
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
dev_err(&pdev->dev, "failed to get baseclk\n");
- return PTR_ERR(priv->mainck);
+ ret = PTR_ERR(priv->mainck);
+ goto sdhci_pltfm_free;
}
priv->hclock = devm_clk_get(&pdev->dev, "hclock");
if (IS_ERR(priv->hclock)) {
dev_err(&pdev->dev, "failed to get hclock\n");
- return PTR_ERR(priv->hclock);
+ ret = PTR_ERR(priv->hclock);
+ goto sdhci_pltfm_free;
}
priv->gck = devm_clk_get(&pdev->dev, "multclk");
if (IS_ERR(priv->gck)) {
dev_err(&pdev->dev, "failed to get multclk\n");
- return PTR_ERR(priv->gck);
+ ret = PTR_ERR(priv->gck);
+ goto sdhci_pltfm_free;
}
ret = sdhci_at91_set_clks_presets(&pdev->dev);
@@ -358,7 +362,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
/* HS200 is broken at this moment */
- host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
ret = sdhci_add_host(host);
if (ret)
@@ -394,8 +398,11 @@ static int sdhci_at91_probe(struct platform_device *pdev)
* detection procedure using the SDMCC_CD signal is bypassed.
* This bit is reset when a software reset for all command is performed
* so we need to implement our own reset function to set back this bit.
+ *
+ * WA: SAMA5D2 doesn't drive CMD if using CD GPIO line.
*/
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ || mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d25c70e861f5..e7b5a98d2622 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -78,6 +78,7 @@ struct sdhci_esdhc {
bool quirk_limited_clk_division;
bool quirk_unreliable_pulse_detection;
bool quirk_fixup_tuning;
+ bool quirk_delay_before_data_reset;
bool quirk_ignore_data_inhibit;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
@@ -703,21 +704,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u32 val;
+ u32 val, bus_width = 0;
+
+ /*
+ * Add delay to make sure all the DMA transfers are finished
+ * for quirk.
+ */
+ if (esdhc->quirk_delay_before_data_reset &&
+ (mask & SDHCI_RESET_DATA) &&
+ (host->flags & SDHCI_REQ_USE_DMA))
+ mdelay(5);
+
+ /*
+ * Save bus-width for eSDHC whose vendor version is 2.2
+ * or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
+ }
sdhci_reset(host, mask);
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ /*
+ * Restore bus-width setting and interrupt registers for eSDHC
+ * whose vendor version is 2.2 or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
+ val |= bus_width;
+ sdhci_writel(host, val, ESDHC_PROCTL);
- if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
- mdelay(5);
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
- if (mask & SDHCI_RESET_ALL) {
+ /*
+ * Some bits have to be cleaned manually for eSDHC whose spec
+ * version is higher than 3.0 for all reset.
+ */
+ if ((mask & SDHCI_RESET_ALL) &&
+ (esdhc->spec_ver >= SDHCI_SPEC_300)) {
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_EN;
sdhci_writel(host, val, ESDHC_TBCTL);
+ /*
+ * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
+ * 0 for quirk.
+ */
if (esdhc->quirk_unreliable_pulse_detection) {
val = sdhci_readl(host, ESDHC_DLLCFG1);
val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
@@ -1040,6 +1078,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
if (match)
esdhc->clk_fixup = match->data;
np = pdev->dev.of_node;
+
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
+ esdhc->quirk_delay_before_data_reset = true;
+
clk = of_clk_get(np, 0);
if (!IS_ERR(clk)) {
/*
@@ -1116,8 +1158,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
- host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
- host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 083e7e053c95..d3135249b2e4 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1134,6 +1134,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
+ /* R1B responses is required to properly manage HW busy detection. */
+ mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
ret = sdhci_setup_host(host);
if (ret)
goto err_put_sync;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 267b90374fa4..3ac3ccf9af5c 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -21,11 +21,13 @@
#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#ifdef CONFIG_X86
#include <asm/iosf_mbi.h>
@@ -599,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot);
+ if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
+ return 0;
+
return intel_host->drv_strength;
}
@@ -782,11 +787,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
+{
+ return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
+ dmi_match(DMI_BIOS_VENDOR, "LENOVO");
+}
+
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
- slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+ if (!glk_broken_cqhci(slot))
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
@@ -1590,11 +1602,59 @@ static int amd_probe(struct sdhci_pci_chip *chip)
return 0;
}
+static u32 sdhci_read_present_state(struct sdhci_host *host)
+{
+ return sdhci_readl(host, SDHCI_PRESENT_STATE);
+}
+
+static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 present_state;
+
+ /*
+ * SDHC 0x7906 requires a hard reset to clear all internal state.
+ * Otherwise it can get into a bad state where the DATA lines are always
+ * read as zeros.
+ */
+ if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
+ pci_clear_master(pdev);
+
+ pci_save_state(pdev);
+
+ pci_set_power_state(pdev, PCI_D3cold);
+ pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
+ pdev->current_state);
+ pci_set_power_state(pdev, PCI_D0);
+
+ pci_restore_state(pdev);
+
+ /*
+ * SDHCI_RESET_ALL says the card detect logic should not be
+ * reset, but since we need to reset the entire controller
+ * we should wait until the card detect logic has stabilized.
+ *
+ * This normally takes about 40ms.
+ */
+ readx_poll_timeout(
+ sdhci_read_present_state,
+ host,
+ present_state,
+ present_state & SDHCI_CD_STABLE,
+ 10000,
+ 100000
+ );
+ }
+
+ return sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops amd_sdhci_pci_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -1670,6 +1730,9 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
+ SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 19944b0049db..7b3c9e6dab65 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -441,6 +441,12 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
}
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD1) {
+ slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
+ host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+ }
+
host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index e5dc6e44c7a4..dbf7bdaa3fa3 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -52,6 +52,9 @@
#define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8
#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
+#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5
+#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4
+#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index a8df5ac97977..6faa36eca170 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
}
@@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ /* R1B responses is required to properly manage HW busy detection. */
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
tegra_sdhci_parse_dt(host);
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 1dea1ba66f7b..4703cd540c7f 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
+
+ /*
+ * For some reason the controller's Host Control2 register reports
+ * the bit representing 1.8V signaling as 0 when read after it was
+ * written as 1. Subsequent read reports 1.
+ *
+ * Since this may cause some issues, do an empty read of the Host
+ * Control2 register here to circumvent this.
+ */
+ sdhci_readw(host, SDHCI_HOST_CONTROL2);
}
static const struct sdhci_ops sdhci_xenon_ops = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e41ccb836538..5a8d97a8f1e1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -152,7 +152,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
u32 present;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- !mmc_card_is_removable(host->mmc))
+ !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
return;
if (enable) {
@@ -977,7 +977,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
{
if (enable)
host->ier |= SDHCI_INT_DATA_TIMEOUT;
@@ -986,28 +986,31 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
+EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- u8 count;
-
- if (host->ops->set_timeout) {
- host->ops->set_timeout(host, cmd);
- } else {
- bool too_big = false;
-
- count = sdhci_calc_timeout(host, cmd, &too_big);
+ bool too_big = false;
+ u8 count = sdhci_calc_timeout(host, cmd, &too_big);
+
+ if (too_big &&
+ host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+ sdhci_calc_sw_timeout(host, cmd);
+ sdhci_set_data_timeout_irq(host, false);
+ } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+ sdhci_set_data_timeout_irq(host, true);
+ }
- if (too_big &&
- host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
- sdhci_calc_sw_timeout(host, cmd);
- sdhci_set_data_timeout_irq(host, false);
- } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
- sdhci_set_data_timeout_irq(host, true);
- }
+ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+}
+EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
- }
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ if (host->ops->set_timeout)
+ host->ops->set_timeout(host, cmd);
+ else
+ __sdhci_set_timeout(host, cmd);
}
static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
@@ -1849,9 +1852,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
else if (timing == MMC_TIMING_UHS_SDR12)
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
- else if (timing == MMC_TIMING_SD_HS ||
- timing == MMC_TIMING_MMC_HS ||
- timing == MMC_TIMING_UHS_SDR25)
+ else if (timing == MMC_TIMING_UHS_SDR25)
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
else if (timing == MMC_TIMING_UHS_SDR50)
ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
@@ -2391,8 +2392,8 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
sdhci_send_tuning(host, opcode);
if (!host->tuning_done) {
- pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
- mmc_hostname(host->mmc));
+ pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
sdhci_abort_tuning(host, opcode);
return -ETIMEDOUT;
}
@@ -3740,6 +3741,9 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc_hostname(mmc), host->version);
}
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
+ mmc->caps2 &= ~MMC_CAP2_CQE;
+
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
else if (!(host->caps & SDHCI_CAN_DO_SDMA))
@@ -3883,11 +3887,13 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
- if (host->clk_mul) {
- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+ if (host->clk_mul)
max_clk = host->max_clk * host->clk_mul;
- } else
- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+ /*
+ * Divided Clock Mode minimum clock rate is always less than
+ * Programmable Clock Mode minimum clock rate.
+ */
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index b258748e97de..f69bebe51520 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -408,6 +408,8 @@ struct sdhci_host {
#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
/* Controller reports inverted write-protect state */
#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
+/* Controller has unusable command queue engine */
+#define SDHCI_QUIRK_BROKEN_CQE (1<<17)
/* Controller does not like fast PIO transfers */
#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
/* Controller does not have a LED */
@@ -797,5 +799,7 @@ void sdhci_start_tuning(struct sdhci_host *host);
void sdhci_end_tuning(struct sdhci_host *host);
void sdhci_reset_tuning(struct sdhci_host *host);
void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 3222ea4d584d..9839821fa27b 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -179,6 +179,22 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
writeb(val, host->ioaddr + reg);
}
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err = sdhci_execute_tuning(mmc, opcode);
+
+ if (err)
+ return err;
+ /*
+ * Tuning data remains in the buffer after tuning.
+ * Do a command and data reset to get rid of it
+ */
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_am654_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -344,6 +360,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto pm_runtime_put;
}
+ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
ret = sdhci_am654_init(host);
if (ret)
goto pm_runtime_put;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 81bd9afb0980..98c575de43c7 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1393,11 +1393,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
const char *name;
irq[0] = platform_get_irq(pdev, 0);
- irq[1] = platform_get_irq(pdev, 1);
- if (irq[0] < 0) {
- dev_err(dev, "Get irq error\n");
+ irq[1] = platform_get_irq_optional(pdev, 1);
+ if (irq[0] < 0)
return -ENXIO;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 29ec78486e69..5c7869f4fc3d 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1184,7 +1184,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (ret == -EPROBE_DEFER)
return ret;
- mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = pdata->max_segs ? : 32;
mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
@@ -1295,12 +1295,14 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
cancel_work_sync(&host->done);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
+ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
if (host->native_hotplug)
pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index 91a2be41edf6..a16d16f97190 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -616,11 +616,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
}
}
- ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
- dev_name(dev), host);
- if (ret)
- goto free_host;
-
if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
host->dma_ops = &uniphier_sd_internal_dma_ops;
else
@@ -648,8 +643,15 @@ static int uniphier_sd_probe(struct platform_device *pdev)
if (ret)
goto free_host;
+ ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
+ dev_name(dev), host);
+ if (ret)
+ goto remove_host;
+
return 0;
+remove_host:
+ tmio_mmc_host_remove(host);
free_host:
tmio_mmc_host_free(host);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index f4ac064ff471..8d96ecba1b55 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -319,6 +319,8 @@ struct via_crdr_mmc_host {
/* some devices need a very long delay for power to stabilize */
#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
+#define VIA_CMD_TIMEOUT_MS 1000
+
static const struct pci_device_id via_ids[] = {
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
@@ -551,14 +553,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host,
{
void __iomem *addrbase;
struct mmc_data *data;
+ unsigned int timeout_ms;
u32 cmdctrl = 0;
WARN_ON(host->cmd);
data = cmd->data;
- mod_timer(&host->timer, jiffies + HZ);
host->cmd = cmd;
+ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS;
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
+
/*Command index*/
cmdctrl = cmd->opcode << 8;