aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/cavium.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/cavium.c')
-rw-r--r--drivers/mmc/host/cavium.c1023
1 files changed, 901 insertions, 122 deletions
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 89deb451e0ac..5005efd113ee 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -25,6 +25,8 @@
#include <linux/regulator/consumer.h>
#include <linux/scatterlist.h>
#include <linux/time.h>
+#include <linux/iommu.h>
+#include <linux/swiotlb.h>
#include "cavium.h"
@@ -38,6 +40,8 @@ const char *cvm_mmc_irq_names[] = {
"MMC Switch Error",
"MMC DMA int Fifo",
"MMC DMA int",
+ "MMC NCB Fault",
+ "MMC RAS",
};
/*
@@ -71,7 +75,7 @@ static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 1}, /* CMD16 */
{1, 1}, /* CMD17 */
{1, 1}, /* CMD18 */
- {3, 1}, /* CMD19 */
+ {2, 1}, /* CMD19 */
{2, 1}, /* CMD20 */
{0, 0}, /* CMD21 */
{0, 0}, /* CMD22 */
@@ -118,6 +122,156 @@ static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 0} /* CMD63 */
};
+static int tapdance;
+module_param(tapdance, int, 0644);
+MODULE_PARM_DESC(tapdance, "adjust bus-timing: (0=mid-eye, positive=Nth_fastest_tap)");
+
+static int clk_scale = 100;
+module_param(clk_scale, int, 0644);
+MODULE_PARM_DESC(clk_scale, "percent scale data_/cmd_out taps (default 100)");
+
+static bool fixed_timing;
+module_param(fixed_timing, bool, 0444);
+MODULE_PARM_DESC(fixed_timing, "use fixed data_/cmd_out taps");
+
+static bool ddr_cmd_taps;
+module_param(ddr_cmd_taps, bool, 0644);
+MODULE_PARM_DESC(ddr_cmd_taps, "reduce cmd_out_taps in DDR modes, as before");
+
+static bool cvm_is_mmc_timing_ddr(struct cvm_mmc_slot *slot)
+{
+ if ((slot->mmc->ios.timing == MMC_TIMING_UHS_DDR50) ||
+ (slot->mmc->ios.timing == MMC_TIMING_MMC_DDR52) ||
+ (slot->mmc->ios.timing == MMC_TIMING_MMC_HS400))
+ return true;
+ else
+ return false;
+}
+
+static void cvm_mmc_set_timing(struct cvm_mmc_slot *slot)
+{
+ if (!is_mmc_otx2(slot->host))
+ return;
+
+ writeq(slot->taps, slot->host->base + MIO_EMM_TIMING(slot->host));
+}
+
+static int tout(struct cvm_mmc_slot *slot, int ps, int hint)
+{
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ int tap_ps = host->per_tap_delay;
+ int timing = mmc->ios.timing;
+ static int old_scale;
+ int taps;
+
+ if (fixed_timing)
+ return hint;
+
+ if (!hint)
+ hint = 63;
+
+ if (!tap_ps)
+ return hint;
+
+ taps = min((int)(ps * clk_scale) / (tap_ps * 100), 63);
+
+ /* when modparam is adjusted, re-announce timing */
+ if (old_scale != clk_scale) {
+ host->delay_logged = 0;
+ old_scale = clk_scale;
+ }
+
+ if (!test_and_set_bit(timing,
+ &host->delay_logged))
+ dev_info(host->dev, "mmc%d.ios_timing:%d %dpS hint:%d taps:%d\n",
+ mmc->index, timing, ps, hint, taps);
+
+ return taps;
+}
+
+static int cvm_mmc_configure_delay(struct cvm_mmc_slot *slot)
+{
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+
+ pr_debug("slot%d.configure_delay\n", slot->bus_id);
+
+ if (is_mmc_8xxx(host)) {
+ /* MIO_EMM_SAMPLE is till T83XX */
+ u64 emm_sample =
+ FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
+ FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->data_cnt);
+ writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
+ } else {
+ int half = MAX_NO_OF_TAPS / 2;
+ int cin = FIELD_GET(MIO_EMM_TIMING_CMD_IN, slot->taps);
+ int din = FIELD_GET(MIO_EMM_TIMING_DATA_IN, slot->taps);
+ int cout, dout;
+
+ if (!slot->taps)
+ cin = din = half;
+ /*
+ * EMM_CMD hold time from rising edge of EMMC_CLK.
+ * Typically 3.0 ns at frequencies < 26 MHz.
+ * Typically 3.0 ns at frequencies <= 52 MHz SDR.
+ * Typically 2.5 ns at frequencies <= 52 MHz DDR.
+ * Typically 0.8 ns at frequencies > 52 MHz SDR.
+ * Typically 0.4 ns at frequencies > 52 MHz DDR.
+ */
+ switch (mmc->ios.timing) {
+ case MMC_TIMING_LEGACY:
+ default:
+ if (mmc->card && mmc_card_mmc(mmc->card))
+ cout = tout(slot, 5000, 39);
+ else
+ cout = tout(slot, 8000, 63);
+ break;
+ case MMC_TIMING_UHS_SDR12:
+ cout = tout(slot, 3000, 39);
+ break;
+ case MMC_TIMING_MMC_HS:
+ cout = tout(slot, 2500, 32);
+ break;
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ cout = tout(slot, 2000, 26);
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ cout = tout(slot, 1500, 20);
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_MMC_HS400:
+ cout = tout(slot, 800, 10);
+ break;
+ }
+
+ if (!is_mmc_95xx(host)) {
+ if (!cvm_is_mmc_timing_ddr(slot))
+ dout = cout;
+ else if (ddr_cmd_taps)
+ cout = dout = cout / 2;
+ else
+ dout = cout / 2;
+ } else
+ dout = tout(slot, 800, 10);
+
+ slot->taps =
+ FIELD_PREP(MIO_EMM_TIMING_CMD_IN, cin) |
+ FIELD_PREP(MIO_EMM_TIMING_CMD_OUT, cout) |
+ FIELD_PREP(MIO_EMM_TIMING_DATA_IN, din) |
+ FIELD_PREP(MIO_EMM_TIMING_DATA_OUT, dout);
+
+ pr_debug("slot%d.taps %llx\n", slot->bus_id, slot->taps);
+ cvm_mmc_set_timing(slot);
+ }
+
+ return 0;
+}
+
static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
{
struct cvm_mmc_cr_type *cr;
@@ -175,14 +329,14 @@ static void check_switch_errors(struct cvm_mmc_host *host)
dev_err(host->dev, "Switch bus width error\n");
}
-static void clear_bus_id(u64 *reg)
+static inline void clear_bus_id(u64 *reg)
{
u64 bus_id_mask = GENMASK_ULL(61, 60);
*reg &= ~bus_id_mask;
}
-static void set_bus_id(u64 *reg, int bus_id)
+static inline void set_bus_id(u64 *reg, int bus_id)
{
clear_bus_id(reg);
*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
@@ -193,25 +347,69 @@ static int get_bus_id(u64 reg)
return FIELD_GET(GENMASK_ULL(61, 60), reg);
}
-/*
- * We never set the switch_exe bit since that would interfere
- * with the commands send by the MMC core.
- */
-static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+/* save old slot details, switch power */
+static bool pre_switch(struct cvm_mmc_host *host, u64 emm_switch)
{
- int retries = 100;
- u64 rsp_sts;
- int bus_id;
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+ struct cvm_mmc_slot *old_slot;
+ bool same_vqmmc = false;
- /*
- * Modes setting only taken from slot 0. Work around that hardware
- * issue by first switching to slot 0.
+ if (host->last_slot == bus_id)
+ return false;
+
+ /* when VQMMC is switched, tri-state CMDn over any slot change
+ * to avoid transient states on D0-7 or CLK from level-shifters
*/
- bus_id = get_bus_id(emm_switch);
- clear_bus_id(&emm_switch);
- writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+ if (host->use_vqmmc) {
+ writeq(1ull << 3, host->base + MIO_EMM_CFG(host));
+ udelay(10);
+ }
+
+ if (host->last_slot >= 0 && host->slot[host->last_slot]) {
+ old_slot = host->slot[host->last_slot];
+ old_slot->cached_switch =
+ readq(host->base + MIO_EMM_SWITCH(host));
+ old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
+
+ same_vqmmc = (slot->mmc->supply.vqmmc ==
+ old_slot->mmc->supply.vqmmc);
+ if (!same_vqmmc && !IS_ERR_OR_NULL(old_slot->mmc->supply.vqmmc))
+ regulator_disable(old_slot->mmc->supply.vqmmc);
+ }
+
+ if (!same_vqmmc && !IS_ERR_OR_NULL(slot->mmc->supply.vqmmc)) {
+ int e = regulator_enable(slot->mmc->supply.vqmmc);
+
+ if (e)
+ dev_err(host->dev, "mmc-slot@%d.vqmmc err %d\n",
+ bus_id, e);
+ }
+
+ host->last_slot = slot->bus_id;
+
+ return true;
+}
+
+static void post_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+
+ if (host->use_vqmmc) {
+ /* enable new CMDn */
+ writeq(1ull << bus_id, host->base + MIO_EMM_CFG(host));
+ udelay(10);
+ }
+
+ writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
+}
+
+static inline void mode_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ u64 rsp_sts;
+ int retries = 100;
- set_bus_id(&emm_switch, bus_id);
writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
/* wait for the switch to finish */
@@ -221,15 +419,49 @@ static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
break;
udelay(10);
} while (--retries);
+}
+
+/*
+ * We never set the switch_exe bit since that would interfere
+ * with the commands send by the MMC core.
+ */
+static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+ bool slot_changed = pre_switch(host, emm_switch);
+
+ /*
+ * Modes setting only taken from slot 0. Work around that hardware
+ * issue by first switching to slot 0.
+ */
+ if (bus_id) {
+ u64 switch0 = emm_switch;
+
+ clear_bus_id(&switch0);
+ mode_switch(host, switch0);
+ }
+
+ mode_switch(host, emm_switch);
check_switch_errors(host);
+ if (slot_changed)
+ post_switch(host, emm_switch);
+ slot->cached_switch = emm_switch;
+ if (emm_switch & MIO_EMM_SWITCH_CLK)
+ slot->cmd6_pending = false;
}
+/* need to change hardware state to match software requirements? */
static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
{
/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
- u64 match = 0x3001070fffffffffull;
+ /* For 9xxx add HS200_TIMING and HS400_TIMING */
+ u64 match = (is_mmc_otx2(slot->host)) ?
+ 0x3007070fffffffffull : 0x3001070fffffffffull;
+ if (!slot->host->powered)
+ return true;
return (slot->cached_switch & match) != (new_val & match);
}
@@ -247,58 +479,62 @@ static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
}
+static void emmc_io_drive_setup(struct cvm_mmc_slot *slot)
+{
+ u64 ioctl_cfg;
+ struct cvm_mmc_host *host = slot->host;
+
+ /* Setup drive and slew only for 9x */
+ if (is_mmc_otx2(host)) {
+ if ((slot->drive < 0) || (slot->slew < 0))
+ return;
+ /* Setup the emmc interface current drive
+ * strength & clk slew rate.
+ */
+ ioctl_cfg = FIELD_PREP(MIO_EMM_IO_CTL_DRIVE, slot->drive) |
+ FIELD_PREP(MIO_EMM_IO_CTL_SLEW, slot->slew);
+ writeq(ioctl_cfg, host->base + MIO_EMM_IO_CTL(host));
+ }
+}
+
static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
u64 emm_switch, wdog;
- emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
- emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
- MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
+ emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
+ emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERRS);
set_bus_id(&emm_switch, slot->bus_id);
- wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
- do_switch(slot->host, emm_switch);
-
- slot->cached_switch = emm_switch;
+ wdog = readq(host->base + MIO_EMM_WDOG(host));
+ do_switch(host, emm_switch);
+ host->powered = true;
msleep(20);
- writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
+ writeq(wdog, host->base + MIO_EMM_WDOG(host));
}
/* Switch to another slot if needed */
static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
- struct cvm_mmc_slot *old_slot;
- u64 emm_sample, emm_switch;
if (slot->bus_id == host->last_slot)
return;
- if (host->last_slot >= 0 && host->slot[host->last_slot]) {
- old_slot = host->slot[host->last_slot];
- old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
- old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
- }
-
- writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
- emm_switch = slot->cached_switch;
- set_bus_id(&emm_switch, slot->bus_id);
- do_switch(host, emm_switch);
-
- emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
- FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
- writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
+ do_switch(host, slot->cached_switch);
+ host->powered = true;
- host->last_slot = slot->bus_id;
+ emmc_io_drive_setup(slot);
+ cvm_mmc_configure_delay(slot);
}
-static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
+static void do_read(struct cvm_mmc_slot *slot, struct mmc_request *req,
u64 dbuf)
{
- struct sg_mapping_iter *smi = &host->smi;
+ struct cvm_mmc_host *host = slot->host;
+ struct sg_mapping_iter *smi = &slot->smi;
int data_len = req->data->blocks * req->data->blksz;
int bytes_xfered, shift = -1;
u64 dat = 0;
@@ -365,7 +601,7 @@ static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
}
}
-static int get_dma_dir(struct mmc_data *data)
+static inline int get_dma_dir(struct mmc_data *data)
{
return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
@@ -374,6 +610,9 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
{
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
+
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -382,6 +621,7 @@ static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
u64 fifo_cfg;
int count;
+ void __iomem *dma_intp = host->dma_base + MIO_EMM_DMA_INT(host);
/* Check if there are any pending requests left */
fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
@@ -392,8 +632,16 @@ static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
- /* Clear and disable FIFO */
- writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+
+ /* on read, wait for internal buffer to flush out to mem */
+ if (get_dma_dir(data) == DMA_FROM_DEVICE) {
+ while (!(readq(dma_intp) & MIO_EMM_DMA_INT_DMA))
+ udelay(10);
+ writeq(MIO_EMM_DMA_INT_DMA, dma_intp);
+ }
+
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -415,7 +663,8 @@ static int check_status(u64 rsp_sts)
if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
return -ETIMEDOUT;
- if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
+ if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR ||
+ rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
return -EIO;
return 0;
}
@@ -435,16 +684,24 @@ static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
{
struct cvm_mmc_host *host = dev_id;
- struct mmc_request *req;
+ struct mmc_request *req = NULL;
+ struct cvm_mmc_slot *slot = NULL;
unsigned long flags = 0;
u64 emm_int, rsp_sts;
bool host_done;
+ int bus_id;
if (host->need_irq_handler_lock)
spin_lock_irqsave(&host->irq_handler_lock, flags);
else
__acquire(&host->irq_handler_lock);
+ rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+ bus_id = get_bus_id(rsp_sts);
+ slot = host->slot[bus_id];
+ if (slot)
+ req = slot->current_req;
+
/* Clear interrupt bits (write 1 clears ). */
emm_int = readq(host->base + MIO_EMM_INT(host));
writeq(emm_int, host->base + MIO_EMM_INT(host));
@@ -452,25 +709,32 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
if (emm_int & MIO_EMM_INT_SWITCH_ERR)
check_switch_errors(host);
- req = host->current_req;
if (!req)
goto out;
- rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+ /*
+ * dma_pend means DMA has stalled with CRC errs.
+ * start teardown, get irq on completion, mmc stack retries.
+ */
+ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) && slot->dma_active) {
+ cleanup_dma(host, rsp_sts);
+ goto out;
+ }
+
/*
* dma_val set means DMA is still in progress. Don't touch
* the request and wait for the interrupt indicating that
* the DMA is finished.
*/
- if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
+ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && slot->dma_active)
goto out;
- if (!host->dma_active && req->data &&
+ if (!slot->dma_active && req->data &&
(emm_int & MIO_EMM_INT_BUF_DONE)) {
unsigned int type = (rsp_sts >> 7) & 3;
if (type == 1)
- do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
+ do_read(slot, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
else if (type == 2)
do_write(req);
}
@@ -480,12 +744,16 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
emm_int & MIO_EMM_INT_CMD_ERR ||
emm_int & MIO_EMM_INT_DMA_ERR;
+ /* Add NCB_FLT interrupt for octtx2 */
+ if (is_mmc_otx2(host))
+ host_done = host_done || emm_int & MIO_EMM_INT_NCB_FLT;
+
if (!(host_done && req->done))
goto no_req_done;
req->cmd->error = check_status(rsp_sts);
- if (host->dma_active && req->data)
+ if (slot->dma_active && req->data)
if (!finish_dma(host, req->data))
goto no_req_done;
@@ -494,7 +762,18 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
(rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
cleanup_dma(host, rsp_sts);
- host->current_req = NULL;
+ /* follow CMD6 timing/width with IMMEDIATE switch */
+ if (slot && slot->cmd6_pending) {
+ if (host_done && !req->cmd->error) {
+ do_switch(host, slot->want_switch);
+ emmc_io_drive_setup(slot);
+ cvm_mmc_configure_delay(slot);
+ } else if (slot) {
+ slot->cmd6_pending = false;
+ }
+ }
+
+ slot->current_req = NULL;
req->done(req);
no_req_done:
@@ -609,9 +888,9 @@ static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
error:
WARN_ON_ONCE(1);
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
- /* Disable FIFO */
- writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
return 0;
}
@@ -653,7 +932,11 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
struct mmc_data *data;
- u64 emm_dma, addr;
+ u64 emm_dma, addr, int_enable_mask = 0;
+ int seg;
+
+ /* cleared by successful termination */
+ mrq->cmd->error = -EINVAL;
if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
!mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
@@ -662,17 +945,27 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
goto error;
}
+ /* unaligned multi-block DMA has problems, so forbid all unaligned */
+ for (seg = 0; seg < mrq->data->sg_len; seg++) {
+ struct scatterlist *sg = &mrq->data->sg[seg];
+ u64 align = (sg->offset | sg->length);
+
+ if (!(align & 7))
+ continue;
+ dev_info(&mmc->card->dev,
+ "Error:64bit alignment required\n");
+ goto error;
+ }
+
cvm_mmc_switch_to(slot);
data = mrq->data;
+
pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
data->blocks, data->blksz, data->blocks * data->blksz);
if (data->timeout_ns)
set_wdog(slot, data->timeout_ns);
- WARN_ON(host->current_req);
- host->current_req = mrq;
-
emm_dma = prepare_ext_dma(mmc, mrq);
addr = prepare_dma(host, data);
if (!addr) {
@@ -680,9 +973,19 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
goto error;
}
- host->dma_active = true;
- host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
- MIO_EMM_INT_DMA_ERR);
+ mrq->host = mmc;
+ WARN_ON(slot->current_req);
+ slot->current_req = mrq;
+ slot->dma_active = true;
+
+ int_enable_mask = MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
+ MIO_EMM_INT_DMA_ERR;
+
+ /* Add NCB_FLT interrupt for octtx2 */
+ if (is_mmc_otx2(host))
+ int_enable_mask |= MIO_EMM_INT_NCB_FLT;
+
+ host->int_enable(host, int_enable_mask);
if (host->dmar_fixup)
host->dmar_fixup(host, mrq->cmd, data, addr);
@@ -700,22 +1003,22 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
return;
error:
- mrq->cmd->error = -EINVAL;
if (mrq->done)
mrq->done(mrq);
host->release_bus(host);
}
-static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+static void do_read_request(struct cvm_mmc_slot *slot, struct mmc_request *mrq)
{
- sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
+ sg_miter_start(&slot->smi, mrq->data->sg, mrq->data->sg_len,
SG_MITER_ATOMIC | SG_MITER_TO_SG);
}
-static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+static void do_write_request(struct cvm_mmc_slot *slot, struct mmc_request *mrq)
{
+ struct cvm_mmc_host *host = slot->host;
unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
- struct sg_mapping_iter *smi = &host->smi;
+ struct sg_mapping_iter *smi = &slot->smi;
unsigned int bytes_xfered;
int shift = 56;
u64 dat = 0;
@@ -749,6 +1052,51 @@ static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
sg_miter_stop(smi);
}
+static void cvm_mmc_track_switch(struct cvm_mmc_slot *slot, u32 cmd_arg)
+{
+ u8 how = (cmd_arg >> 24) & 3;
+ u8 where = (u8)(cmd_arg >> 16);
+ u8 val = (u8)(cmd_arg >> 8);
+
+ slot->want_switch = slot->cached_switch;
+
+ /*
+ * track ext_csd assignments (how==3) for critical entries
+ * to make sure we follow up with MIO_EMM_SWITCH adjustment
+ * before ANY mmc/core interaction at old settings.
+ * Current mmc/core logic (linux 4.14) does not set/clear
+ * bits (how = 1 or 2), which would require more complex
+ * logic to track the intent of a change
+ */
+
+ if (how != 3)
+ return;
+
+ switch (where) {
+ case EXT_CSD_BUS_WIDTH:
+ slot->want_switch &= ~MIO_EMM_SWITCH_BUS_WIDTH;
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, val);
+ break;
+ case EXT_CSD_POWER_CLASS:
+ slot->want_switch &= ~MIO_EMM_SWITCH_POWER_CLASS;
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, val);
+ break;
+ case EXT_CSD_HS_TIMING:
+ slot->want_switch &= ~MIO_EMM_SWITCH_TIMING;
+ if (val)
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_TIMING,
+ (1 << (val - 1)));
+ break;
+ default:
+ return;
+ }
+
+ slot->cmd6_pending = true;
+}
+
static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct cvm_mmc_slot *slot = mmc_priv(mmc);
@@ -777,23 +1125,27 @@ static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
mods = cvm_mmc_get_cr_mods(cmd);
- WARN_ON(host->current_req);
- host->current_req = mrq;
+ WARN_ON(slot->current_req);
+ mrq->host = mmc;
+ slot->current_req = mrq;
if (cmd->data) {
if (cmd->data->flags & MMC_DATA_READ)
- do_read_request(host, mrq);
+ do_read_request(slot, mrq);
else
- do_write_request(host, mrq);
+ do_write_request(slot, mrq);
if (cmd->data->timeout_ns)
set_wdog(slot, cmd->data->timeout_ns);
} else
set_wdog(slot, 0);
- host->dma_active = false;
+ slot->dma_active = false;
host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
+ if (cmd->opcode == MMC_SWITCH)
+ cvm_mmc_track_switch(slot, cmd->arg);
+
emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
@@ -819,37 +1171,257 @@ retry:
if (!retries)
dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
+ if (cmd->opcode == MMC_SWITCH)
+ udelay(1300);
+}
+
+static void cvm_mmc_wait_done(struct mmc_request *cvm_mrq)
+{
+ complete(&cvm_mrq->completion);
+}
+
+static int cvm_mmc_r1_cmd(struct mmc_host *mmc, u32 *statp, u32 opcode)
+{
+ static struct mmc_command cmd = {};
+ static struct mmc_request cvm_mrq = {};
+
+ if (!opcode)
+ opcode = MMC_SEND_STATUS;
+ cmd.opcode = opcode;
+ if (mmc->card)
+ cmd.arg = mmc->card->rca << 16;
+ else
+ cmd.arg = 1 << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ cmd.data = NULL;
+ cvm_mrq.cmd = &cmd;
+
+ init_completion(&cvm_mrq.completion);
+ cvm_mrq.done = cvm_mmc_wait_done;
+
+ cvm_mmc_request(mmc, &cvm_mrq);
+ if (!wait_for_completion_timeout(&cvm_mrq.completion,
+ msecs_to_jiffies(10))) {
+ mmc_abort_tuning(mmc, opcode);
+ return -ETIMEDOUT;
+ }
+
+ if (statp)
+ *statp = cmd.resp[0];
+
+ return cvm_mrq.cmd->error;
+}
+
+static int cvm_mmc_data_tuning(struct mmc_host *mmc, u32 *statp, u32 opcode)
+{
+ int err = 0;
+ u8 *ext_csd;
+ static struct mmc_command cmd = {};
+ static struct mmc_data data = {};
+ static struct mmc_request cvm_mrq = {};
+ static struct scatterlist sg;
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct mmc_card *card = mmc->card;
+
+ if (!(slot->cached_switch & MIO_EMM_SWITCH_HS400_TIMING)) {
+ int edetail = -EINVAL;
+ int core_opinion;
+
+ core_opinion =
+ mmc_send_tuning(mmc, opcode, &edetail);
+
+ /* only accept mmc/core opinion when it's happy */
+ if (!core_opinion)
+ return core_opinion;
+ }
+
+ /* EXT_CSD supported only after ver 3 */
+ if (card && card->csd.mmca_vsn <= CSD_SPEC_VER_3)
+ return -EOPNOTSUPP;
+ /*
+ * As the ext_csd is so large and mostly unused, we don't store the
+ * raw block in mmc_card.
+ */
+ ext_csd = kzalloc(BLKSZ_EXT_CSD, GFP_KERNEL);
+ if (!ext_csd)
+ return -ENOMEM;
+
+ cvm_mrq.cmd = &cmd;
+ cvm_mrq.data = &data;
+ cmd.data = &data;
+
+ cmd.opcode = MMC_SEND_EXT_CSD;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = BLKSZ_EXT_CSD;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, ext_csd, BLKSZ_EXT_CSD);
+
+ /* set timeout */
+ if (card) {
+ /* SD cards use a 100 multiplier rather than 10 */
+ u32 mult = mmc_card_sd(card) ? 100 : 10;
+
+ data.timeout_ns = card->csd.taac_ns * mult;
+ data.timeout_clks = card->csd.taac_clks * mult;
+ } else {
+ data.timeout_ns = 50 * NSEC_PER_MSEC;
+ }
+
+ init_completion(&cvm_mrq.completion);
+ cvm_mrq.done = cvm_mmc_wait_done;
+
+ cvm_mmc_request(mmc, &cvm_mrq);
+ if (!wait_for_completion_timeout(&cvm_mrq.completion,
+ msecs_to_jiffies(100))) {
+ mmc_abort_tuning(mmc, cmd.opcode);
+ err = -ETIMEDOUT;
+ }
+
+ data.sg_len = 0; /* FIXME: catch over-time completions? */
+ kfree(ext_csd);
+
+ if (err)
+ return err;
+
+ if (statp)
+ *statp = cvm_mrq.cmd->resp[0];
+
+ return cvm_mrq.cmd->error;
+}
+
+/* adjusters for the 4 otx2 delay line taps */
+struct adj {
+ const char *name;
+ u64 mask;
+ int (*test)(struct mmc_host *mmc, u32 *statp, u32 opcode);
+ u32 opcode;
+ bool ddr_only;
+};
+
+static int adjust_tuning(struct mmc_host *mmc, struct adj *adj, u32 opcode)
+{
+ int err, start_run = -1, best_run = 0, best_start = -1;
+ int last_good = -1;
+ bool prev_ok = false;
+ u64 timing, tap;
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ char how[MAX_NO_OF_TAPS+1] = "";
+
+ /* loop over range+1 to simplify processing */
+ for (tap = 0; tap <= MAX_NO_OF_TAPS; tap++, prev_ok = !err) {
+ if (tap < MAX_NO_OF_TAPS) {
+ timing = readq(host->base + MIO_EMM_TIMING(host));
+ timing &= ~adj->mask;
+ timing |= (tap << __bf_shf(adj->mask));
+ writeq(timing, host->base + MIO_EMM_TIMING(host));
+
+ err = adj->test(mmc, NULL, opcode);
+
+ how[tap] = "-+"[!err];
+ if (!err)
+ last_good = tap;
+ } else {
+ /*
+ * putting the end+1 case in loop simplifies
+ * logic, allowing 'prev_ok' to process a
+ * sweet spot in tuning which extends to wall.
+ */
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ /*
+ * If no CRC/etc errors in response, but previous
+ * failed, note the start of a new run
+ */
+ if (!prev_ok)
+ start_run = tap;
+ } else if (prev_ok) {
+ int run = tap - 1 - start_run;
+
+ /* did we just exit a wider sweet spot? */
+ if (start_run >= 0 && run > best_run) {
+ best_start = start_run;
+ best_run = run;
+ }
+ }
+ }
+
+ if (best_start < 0) {
+ dev_warn(host->dev, "%s %lldMHz tuning %s failed\n",
+ mmc_hostname(mmc), slot->clock / 1000000, adj->name);
+ return -EINVAL;
+ }
+
+ tap = best_start + best_run / 2;
+ how[tap] = '@';
+ if (tapdance) {
+ tap = last_good - tapdance;
+ how[tap] = 'X';
+ }
+ dev_dbg(host->dev, "%s/%s %d/%lld/%d %s\n",
+ mmc_hostname(mmc), adj->name,
+ best_start, tap, best_start + best_run,
+ how);
+ slot->taps &= ~adj->mask;
+ slot->taps |= (tap << __bf_shf(adj->mask));
+ cvm_mmc_set_timing(slot);
+ return 0;
+}
+
+static u32 max_supported_frequency(struct cvm_mmc_host *host)
+{
+ /* Default maximum freqeuncey is 52000000 for chip prior to 9X */
+ u32 max_frequency = MHZ_52;
+
+ if (is_mmc_otx2(host)) {
+ /* Default max frequency is 200MHz for 9X chips */
+ max_frequency = MHZ_200;
+
+ /* Erratum is only applicable pass A0 */
+ if (is_mmc_otx2_A0(host))
+ max_frequency = MHZ_100;
+ }
+ return max_frequency;
}
static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
+
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
int clk_period = 0, power_class = 10, bus_width = 0;
- u64 clock, emm_switch;
+ u64 clock, emm_switch, mode;
+ u32 max_f;
+
+ if (ios->power_mode == MMC_POWER_OFF) {
+ if (host->powered) {
+ cvm_mmc_reset_bus(slot);
+ if (host->global_pwr_gpiod)
+ host->set_shared_power(host, 0);
+ else if (!IS_ERR_OR_NULL(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ host->powered = false;
+ }
+ set_wdog(slot, 0);
+ return;
+ }
host->acquire_bus(host);
cvm_mmc_switch_to(slot);
- /* Set the power state */
- switch (ios->power_mode) {
- case MMC_POWER_ON:
- break;
-
- case MMC_POWER_OFF:
- cvm_mmc_reset_bus(slot);
- if (host->global_pwr_gpiod)
- host->set_shared_power(host, 0);
- else if (!IS_ERR(mmc->supply.vmmc))
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
- break;
-
- case MMC_POWER_UP:
+ if (ios->power_mode == MMC_POWER_UP) {
if (host->global_pwr_gpiod)
host->set_shared_power(host, 1);
- else if (!IS_ERR(mmc->supply.vmmc))
+ else if (!IS_ERR_OR_NULL(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
- break;
}
/* Convert bus width to HW definition */
@@ -866,41 +1438,201 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* DDR is available for 4/8 bit bus width */
- if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
- bus_width |= 4;
+ switch (ios->timing) {
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ if (ios->bus_width)
+ bus_width |= 4;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ if (ios->bus_width & 2)
+ bus_width |= 4;
+ break;
+ }
/* Change the clock frequency. */
clock = ios->clock;
- if (clock > 52000000)
- clock = 52000000;
+ max_f = max_supported_frequency(host);
+
+ if (clock < mmc->f_min)
+ clock = mmc->f_min;
+ if (clock > max_f)
+ clock = max_f;
+
slot->clock = clock;
- if (clock)
- clk_period = (host->sys_freq + clock - 1) / (2 * clock);
+ if (clock) {
+ clk_period = host->sys_freq / (2 * clock);
+ /* check to not exceed requested speed */
+ while (1) {
+ int hz = host->sys_freq / (2 * clk_period);
- emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
- (ios->timing == MMC_TIMING_MMC_HS)) |
+ if (hz <= clock)
+ break;
+ clk_period++;
+ }
+ }
+
+ emm_switch =
FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING, 1);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS200_TIMING, 1);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS400_TIMING, 1);
+ break;
+ }
set_bus_id(&emm_switch, slot->bus_id);
+ pr_debug("mmc-slot%d trying switch %llx w%lld hs%lld hs200:%lld hs400:%lld\n",
+ slot->bus_id, emm_switch,
+ FIELD_GET(MIO_EMM_SWITCH_BUS_WIDTH, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS_TIMING, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS200_TIMING, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS400_TIMING, emm_switch));
+
if (!switch_val_changed(slot, emm_switch))
goto out;
set_wdog(slot, 0);
do_switch(host, emm_switch);
+
+ mode = readq(host->base + MIO_EMM_MODE(host, slot->bus_id));
+ pr_debug("mmc-slot%d mode %llx w%lld hs%lld hs200:%lld hs400:%lld\n",
+ slot->bus_id, mode,
+ (mode >> 40) & 7, (mode >> 48) & 1,
+ (mode >> 49) & 1, (mode >> 50) & 1);
+
slot->cached_switch = emm_switch;
+ host->powered = true;
+ cvm_mmc_configure_delay(slot);
out:
host->release_bus(host);
}
+static struct adj adj[] = {
+ { "CMD_IN", MIO_EMM_TIMING_CMD_IN,
+ cvm_mmc_r1_cmd, MMC_SEND_STATUS, },
+ { "DATA_IN", MIO_EMM_TIMING_DATA_IN,
+ cvm_mmc_data_tuning, },
+ { NULL, },
+};
+
+static int cvm_scan_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct adj *a;
+ int ret;
+
+ for (a = adj; a->name; a++) {
+ if (a->ddr_only && !cvm_is_mmc_timing_ddr(slot))
+ continue;
+
+ ret = adjust_tuning(mmc, a,
+ a->opcode ?: opcode);
+
+ if (ret)
+ return ret;
+ }
+
+ cvm_mmc_set_timing(slot);
+ return 0;
+}
+
+static int cvm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ int clk_period, hz;
+
+ int ret;
+
+ do {
+ u64 emm_switch =
+ readq(host->base + MIO_EMM_MODE(host, slot->bus_id));
+
+ clk_period = FIELD_GET(MIO_EMM_SWITCH_CLK_LO, emm_switch);
+ dev_info(slot->host->dev, "%s re-tuning\n",
+ mmc_hostname(mmc));
+ ret = cvm_scan_tuning(mmc, opcode);
+ if (ret) {
+ int inc = clk_period >> 3;
+
+ if (!inc)
+ inc++;
+ clk_period += inc;
+ hz = host->sys_freq / (2 * clk_period);
+ pr_debug("clk_period %d += %d, now %d Hz\n",
+ clk_period - inc, inc, hz);
+
+ if (hz < 400000)
+ break;
+
+ slot->clock = hz;
+ mmc->ios.clock = hz;
+
+ emm_switch &= ~MIO_EMM_SWITCH_CLK_LO;
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
+ clk_period);
+ emm_switch &= ~MIO_EMM_SWITCH_CLK_HI;
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
+ clk_period);
+ do_switch(host, emm_switch);
+ }
+ } while (ret);
+
+ return ret;
+}
+
+static int cvm_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+
+ return cvm_mmc_configure_delay(slot);
+}
+
+static void cvm_mmc_reset(struct mmc_host *mmc)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ u64 r;
+
+ cvm_mmc_reset_bus(slot);
+
+ r = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
+ FIELD_PREP(MIO_EMM_CMD_BUS_ID, slot->bus_id);
+
+ writeq(r, host->base + MIO_EMM_CMD(host));
+
+ do {
+ r = readq(host->base + MIO_EMM_RSP_STS(host));
+ } while (!(r & MIO_EMM_RSP_STS_CMD_DONE));
+}
+
static const struct mmc_host_ops cvm_mmc_ops = {
.request = cvm_mmc_request,
.set_ios = cvm_mmc_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
+ .hw_reset = cvm_mmc_reset,
+ .execute_tuning = cvm_execute_tuning,
+ .prepare_hs400_tuning = cvm_prepare_hs400_tuning,
};
static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
@@ -917,7 +1649,7 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
struct cvm_mmc_host *host = slot->host;
u64 emm_switch;
- /* Enable this bus slot. */
+ /* Enable this bus slot. Overridden when vqmmc-switching engaged */
host->emm_cfg |= (1ull << slot->bus_id);
writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
udelay(10);
@@ -933,8 +1665,8 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
/* Make the changes take effect on this bus slot. */
set_bus_id(&emm_switch, slot->bus_id);
do_switch(host, emm_switch);
-
slot->cached_switch = emm_switch;
+ host->powered = true;
/*
* Set watchdog timeout value and default reset value
@@ -953,7 +1685,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
struct device_node *node = dev->of_node;
struct mmc_host *mmc = slot->mmc;
- u64 clock_period;
+ u32 max_frequency, current_drive, clk_slew;
int ret;
ret = of_property_read_u32(node, "reg", &id);
@@ -962,8 +1694,14 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
return ret;
}
- if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
- dev_err(dev, "Invalid reg property on %pOF\n", node);
+ if (id >= CAVIUM_MAX_MMC) {
+ dev_err(dev, "Invalid reg=<%d> property on %pOF\n", id, node);
+ return -EINVAL;
+ }
+
+ if (slot->host->slot[id]) {
+ dev_err(dev, "Duplicate reg=<%d> property on %pOF\n",
+ id, node);
return -EINVAL;
}
@@ -974,7 +1712,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
* Legacy Octeon firmware has no regulator entry, fall-back to
* a hard-coded voltage to get a sane OCR.
*/
- if (IS_ERR(mmc->supply.vmmc))
+ if (IS_ERR_OR_NULL(mmc->supply.vmmc))
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* Common MMC bindings */
@@ -982,7 +1720,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
if (ret)
return ret;
- /* Set bus width */
+ /* Set bus width from obsolete properties, if unset */
if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
if (bus_width == 8)
@@ -991,19 +1729,40 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
mmc->caps |= MMC_CAP_4_BIT_DATA;
}
+ max_frequency = max_supported_frequency(slot->host);
+
/* Set maximum and minimum frequency */
if (!mmc->f_max)
of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
- if (!mmc->f_max || mmc->f_max > 52000000)
- mmc->f_max = 52000000;
- mmc->f_min = 400000;
+ if (!mmc->f_max || mmc->f_max > max_frequency)
+ mmc->f_max = max_frequency;
+ mmc->f_min = KHZ_400;
/* Sampling register settings, period in picoseconds */
- clock_period = 1000000000000ull / slot->host->sys_freq;
of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
- slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
- slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
+ if (is_mmc_8xxx(slot->host) || is_mmc_otx2(slot->host)) {
+ slot->cmd_cnt = cmd_skew;
+ slot->data_cnt = dat_skew;
+ } else {
+ u64 clock_period = 1000000000000ull / slot->host->sys_freq;
+
+ slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
+ slot->data_cnt = (dat_skew + clock_period / 2) / clock_period;
+ }
+
+ /* Get current drive and clk skew */
+ ret = of_property_read_u32(node, "cavium,drv-strength", &current_drive);
+ if (ret)
+ slot->drive = -1;
+ else
+ slot->drive = current_drive;
+
+ ret = of_property_read_u32(node, "cavium,clk-slew", &clk_slew);
+ if (ret)
+ slot->slew = -1;
+ else
+ slot->slew = clk_slew;
return id;
}
@@ -1012,6 +1771,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
{
struct cvm_mmc_slot *slot;
struct mmc_host *mmc;
+ struct iommu_domain *dom;
int ret, id;
mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
@@ -1030,16 +1790,19 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
/* Set up host parameters */
mmc->ops = &cvm_mmc_ops;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_BUS_WIDTH_TEST;
+ mmc->caps |= MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD;
+
/*
- * We only have a 3.3v supply, we cannot support any
- * of the UHS modes. We do support the high speed DDR
- * modes up to 52MHz.
+ * We only have a 3.3v supply for slots, we cannot
+ * support any of the UHS modes. We do support the
+ * high speed DDR modes up to 52MHz.
*
* Disable bounce buffers for max_segs = 1
*/
- mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
- MMC_CAP_3_3V_DDR;
+
+ if (!is_mmc_otx2(host))
+ mmc->caps |= MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
@@ -1055,14 +1818,30 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
/* DMA block count field is 15 bits */
mmc->max_blk_count = 32767;
+ dom = iommu_get_domain_for_dev(dev->parent);
+ if (dom && dom->type == IOMMU_DOMAIN_IDENTITY) {
+ unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+
+ if (mmc->max_seg_size > max_size)
+ mmc->max_seg_size = max_size;
+
+ max_size *= mmc->max_segs;
+
+ if (mmc->max_req_size > max_size)
+ mmc->max_req_size = max_size;
+ }
+
+ mmc_can_retune(mmc);
+
slot->clock = mmc->f_min;
slot->bus_id = id;
slot->cached_rca = 1;
host->acquire_bus(host);
host->slot[id] = slot;
- cvm_mmc_switch_to(slot);
+ host->use_vqmmc |= !IS_ERR_OR_NULL(slot->mmc->supply.vqmmc);
cvm_mmc_init_lowlevel(slot);
+ cvm_mmc_switch_to(slot);
host->release_bus(host);
ret = mmc_add_host(mmc);