ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/mmc/host/sdhci-asr.c b/marvell/linux/drivers/mmc/host/sdhci-asr.c
new file mode 100644
index 0000000..a24949d
--- /dev/null
+++ b/marvell/linux/drivers/mmc/host/sdhci-asr.c
@@ -0,0 +1,1419 @@
+/*
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Mingwei Wang <mwwang@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/clk/mmp.h>
+#include <linux/crc32.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/mmc/asr_dvfs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_data/asr_sdhci.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define ASR_RPM_DELAY_MS 50
+
+#define SDHC_OP_CTRL 0x104
+#define SDHC_OP_EXT_REG 0x108
+#define INT_CLK_GATE_MASK (0x3<<8)
+#define OVRRD_CLK_OEN 0x0800
+#define FORCE_CLK_ON 0x1000
+
+#define SDHC_LEGACY_CTRL_REG 0x10C
+#define GEN_PAD_CLK_ON (0x1 << 6)
+
+#define SDHC_LEGACY_CEATA_REG 0x110
+#define SDHC_MMC_CTRL_REG 0x114
+#define MISC_INT_EN 0x0002
+#define MISC_INT 0x0004
+#define ENHANCE_STROBE_EN 0x0100
+#define MMC_HS400 0x0200
+#define MMC_HS200 0x0400
+#define MMC_CARD_MODE 0x1000
+
+#define SDHC_RX_CFG_REG 0x118
+#define RX_SDCLK_SEL0_MASK 0x3
+#define RX_SDCLK_SEL0_SHIFT 0
+#define RX_SDCLK_SEL1_MASK 0x3
+#define RX_SDCLK_SEL1_SHIFT 2
+#define RX_SDCLK_SEL1_PAD 0x0
+#define RX_SDCLK_SEL1_DDLL 0x01
+#define RX_SDCLK_SEL1_INTERNAL 0x02
+
+#define SDHC_TX_CFG_REG 0x11C
+#define TX_DLINE_SRC_SEL (0x1 << 29)
+#define TX_INT_CLK_SEL (0x1 << 30)
+#define TX_MUX_SEL (0x1 << 31)
+
+#define SDHC_HWTUNE_CFG_REG 0x120
+#define SDHC_HWTUNE_CFG2_REG 0x124
+#define SDHC_ROUNDTRIP_TIMING_REG 0x128
+#define WRDATA_WAIT_CYCLES_MASK 0xF
+#define WRDATA_WAIT_CYCLES_SHIFT 16
+
+#define SDHC_GPIO_CFG_REG 0x12C
+
+#define SDHC_DLINE_CTRL_REG 0x130
+#define DLINE_PU 0x01
+#define RX_DLINE_CODE_MASK 0xFF
+#define RX_DLINE_CODE_SHIFT 0x10
+#define TX_DLINE_CODE_MASK 0xFF
+#define TX_DLINE_CODE_SHIFT 0x18
+
+#define SDHC_DLINE_CFG_REG 0x134
+#define RX_DLINE_REG_MASK 0xFF
+#define RX_DLINE_REG_SHIFT 0x00
+#define RX_DLINE_RSTB_MASK 0x1
+#define RX_DLINE_RSTB_SHIFT 7
+#define RX_DLINE_GAIN_MASK 0x1
+#define RX_DLINE_GAIN_SHIFT 0x8
+#define RX_DLINE_GAIN 0x1
+#define TX_DLINE_REG_MASK 0xFF
+#define TX_DLINE_REG_SHIFT 0x10
+#define TX_DLINE_RSTB_MASK 0x1
+#define TX_DLINE_RSTB_SHIFT 23
+
+#define SDHC_RX_TUNE_DELAY_MIN 0x0
+#define SDHC_RX_TUNE_DELAY_MAX 0xFF
+#define SDHC_RX_TUNE_DELAY_STEP 0x1
+
+#define AIB_MMC1_IO_REG 0xD401E81C
+#define APBC_ASFAR 0xD4015050
+#define AKEY_ASFAR 0xbaba
+#define AKEY_ASSAR 0xeb10
+#define MMC1_PAD_1V8 (0x1 << 2)
+
+struct sdhci_asr {
+ struct clk *clk_core;
+ struct clk *clk_io;
+ u8 clk_enable;
+ u8 power_mode;
+ unsigned int tx_dly_val;
+ unsigned int rx_dly_val;
+};
+
+static const u32 tuning_patten4[16] = {
+ 0x00ff0fff, 0xccc3ccff, 0xffcc3cc3, 0xeffefffe,
+ 0xddffdfff, 0xfbfffbff, 0xff7fffbf, 0xefbdf777,
+ 0xf0fff0ff, 0x3cccfc0f, 0xcfcc33cc, 0xeeffefff,
+ 0xfdfffdff, 0xffbfffdf, 0xfff7ffbb, 0xde7b7ff7,
+};
+
+static const u32 tuning_patten8[32] = {
+ 0xff00ffff, 0x0000ffff, 0xccccffff, 0xcccc33cc,
+ 0xcc3333cc, 0xffffcccc, 0xffffeeff, 0xffeeeeff,
+ 0xffddffff, 0xddddffff, 0xbbffffff, 0xbbffffff,
+ 0xffffffbb, 0xffffff77, 0x77ff7777, 0xffeeddbb,
+ 0x00ffffff, 0x00ffffff, 0xccffff00, 0xcc33cccc,
+ 0x3333cccc, 0xffcccccc, 0xffeeffff, 0xeeeeffff,
+ 0xddffffff, 0xddffffff, 0xffffffdd, 0xffffffbb,
+ 0xffffbbbb, 0xffff77ff, 0xff7777ff, 0xeeddbb77,
+};
+
+static void asr_sw_rx_tuning_prepare(struct sdhci_host *host, u8 dline_reg)
+{
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHC_DLINE_CFG_REG);
+ reg &= ~(RX_DLINE_REG_MASK << RX_DLINE_REG_SHIFT);
+ reg |= dline_reg << RX_DLINE_REG_SHIFT;
+ /* release RX reset signal */
+ reg |= 0x1 << RX_DLINE_RSTB_SHIFT;
+ sdhci_writel(host, reg, SDHC_DLINE_CFG_REG);
+
+ reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG);
+ reg |= DLINE_PU;
+ sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG);
+
+ reg = sdhci_readl(host, SDHC_RX_CFG_REG);
+ reg &= ~(RX_SDCLK_SEL1_MASK << RX_SDCLK_SEL1_SHIFT);
+ reg |= RX_SDCLK_SEL1_DDLL << RX_SDCLK_SEL1_SHIFT;
+ sdhci_writel(host, reg, SDHC_RX_CFG_REG);
+}
+
+static void asr_sw_rx_set_delaycode(struct sdhci_host *host, u32 delay)
+{
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG);
+ reg &= ~(RX_DLINE_CODE_MASK << RX_DLINE_CODE_SHIFT);
+ reg |= (delay & RX_DLINE_CODE_MASK) << RX_DLINE_CODE_SHIFT;
+ sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG);
+}
+
+static void asr_sw_tx_no_tuning(struct sdhci_host *host)
+{
+ u32 reg;
+
+ /* set TX_MUX_SEL */
+ reg = sdhci_readl(host, SDHC_TX_CFG_REG);
+ reg &= ~TX_MUX_SEL;
+ sdhci_writel(host, reg, SDHC_TX_CFG_REG);
+}
+
+static void asr_sw_tx_tuning_prepare(struct sdhci_host *host)
+{
+ u32 reg;
+
+ /* set TX_MUX_SEL */
+ reg = sdhci_readl(host, SDHC_TX_CFG_REG);
+ reg |= TX_MUX_SEL;
+ sdhci_writel(host, reg, SDHC_TX_CFG_REG);
+
+ reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG);
+ reg |= DLINE_PU;
+ sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG);
+}
+
+static void asr_sw_tx_set_dlinereg(struct sdhci_host *host, u8 dline_reg)
+{
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHC_DLINE_CFG_REG);
+ reg &= ~(TX_DLINE_REG_MASK << TX_DLINE_REG_SHIFT);
+ reg |= dline_reg << TX_DLINE_REG_SHIFT;
+ /* release TX reset signal */
+ reg |= 0x1 << TX_DLINE_RSTB_SHIFT;
+ sdhci_writel(host, reg, SDHC_DLINE_CFG_REG);
+}
+
+static void asr_sw_tx_set_delaycode(struct sdhci_host *host, u32 delay)
+{
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG);
+ reg &= ~(TX_DLINE_CODE_MASK << TX_DLINE_CODE_SHIFT);
+ reg |= (delay & TX_DLINE_CODE_MASK) << TX_DLINE_CODE_SHIFT;
+ sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG);
+}
+
+static void asr_sdhci_clear_set_irqs(struct sdhci_host *host, u32 clr, u32 set)
+{
+ u32 ier;
+
+ ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+ ier &= ~clr;
+ ier |= set;
+ sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static void asr_select_rx_pad_clk(struct sdhci_host *host)
+{
+ u32 tmp_reg = 0;
+
+ tmp_reg = sdhci_readl(host, SDHC_RX_CFG_REG);
+ tmp_reg &= ~(RX_SDCLK_SEL0_MASK<< RX_SDCLK_SEL0_SHIFT);
+ tmp_reg &= ~(RX_SDCLK_SEL1_MASK<< RX_SDCLK_SEL1_SHIFT);
+ tmp_reg |= RX_SDCLK_SEL1_PAD << RX_SDCLK_SEL1_SHIFT;
+ sdhci_writel(host, tmp_reg, SDHC_RX_CFG_REG);
+
+ /*
+ * Data CRC status response delay need to be 3 cycle for some wifi,
+ * like Hi2825, violating sdio SPEC. Make a fix from host.
+ *
+ * Should not use blow code for other sdio wifi.
+ */
+ if (host->quirks2 & SDHCI_QUIRK2_LONG_DATA_CRC_STATUS) {
+ tmp_reg = sdhci_readl(host, SDHC_ROUNDTRIP_TIMING_REG);
+ tmp_reg &= ~(WRDATA_WAIT_CYCLES_MASK << WRDATA_WAIT_CYCLES_SHIFT);
+ tmp_reg |= 0x4 << WRDATA_WAIT_CYCLES_SHIFT;
+ sdhci_writel(host, tmp_reg, SDHC_ROUNDTRIP_TIMING_REG);
+ }
+}
+
+static int asr_set_rx_timing_cfg(struct sdhci_host *host,
+ struct asr_sdhci_platdata *pdata,
+ unsigned int clock)
+{
+ unsigned char timing = host->mmc->ios.timing;
+ struct asr_sdhci_dtr_data *dtr_data;
+
+ if (!pdata || !pdata->dtr_data)
+ return 0;
+
+ if (timing > MMC_TIMING_MMC_HS400) {
+ pr_err("%s: invalid timing %d\n",
+ mmc_hostname(host->mmc), timing);
+ return 0;
+ }
+
+ dtr_data = &pdata->dtr_data[timing];
+ if (timing != dtr_data->timing)
+ return 0;
+
+ if (clock <= 26000000 || !dtr_data->rx_delay) {
+ asr_select_rx_pad_clk(host);
+ return 0;
+ }
+
+ asr_sw_rx_tuning_prepare(host, dtr_data->rx_dline_reg);
+ asr_sw_rx_set_delaycode(host, dtr_data->rx_delay);
+ return 1;
+}
+
+static void asr_set_tx_timing_cfg(struct sdhci_host *host,
+ struct asr_sdhci_platdata *pdata)
+{
+ unsigned char timing = host->mmc->ios.timing;
+ struct asr_sdhci_dtr_data *dtr_data;
+ u32 tmp_reg = 0;
+
+ if (!pdata || !pdata->dtr_data)
+ return;
+
+ if (timing > MMC_TIMING_MMC_HS400) {
+ pr_err("%s: invalid timing %d\n", mmc_hostname(host->mmc),
+ timing);
+ return;
+ }
+
+ dtr_data = &pdata->dtr_data[timing];
+ if (timing != dtr_data->timing)
+ return;
+
+ /* set Tx delay */
+ if (dtr_data->tx_delay) {
+ asr_sw_tx_set_dlinereg(host, dtr_data->tx_dline_reg);
+ asr_sw_tx_set_delaycode(host, dtr_data->tx_delay);
+ asr_sw_tx_tuning_prepare(host);
+ } else {
+ asr_sw_tx_no_tuning(host);
+
+ /*
+ * For default or high speed mode, enable TX_INT_CLK_SEL
+ * to select clock from inverter of internal work clock.
+ * This setting will guarantee the hold time
+ */
+ tmp_reg = sdhci_readl(host, SDHC_TX_CFG_REG);
+ if (timing <= MMC_TIMING_UHS_SDR50)
+ tmp_reg |= TX_INT_CLK_SEL;
+ else
+ tmp_reg &= ~TX_INT_CLK_SEL;
+ if(host->quirks2 & SDHCI_QUIRK2_TX_INT_CLOCK)
+ tmp_reg |= TX_INT_CLK_SEL;
+ sdhci_writel(host, tmp_reg, SDHC_TX_CFG_REG);
+ }
+}
+
+#define SLOW_CLOCK 52000000
+#define FAST_CLOCK 100000000
+static void asr_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+
+ if (clock == 0)
+ return;
+
+ asr_set_tx_timing_cfg(host, pdata);
+ asr_set_rx_timing_cfg(host, pdata, clock);
+
+ /*
+ * Configure pin state like drive strength according to bus clock.
+ * 1. Use slow setting when new bus clock < FAST_CLOCK while
+ * current >= FAST_CLOCK.
+ * 2. Use fast setting when new bus clock >= FAST_CLOCK while
+ * current < FAST_CLOCK.
+ */
+ if (clock <= SLOW_CLOCK) {
+ if ((host->clock > SLOW_CLOCK) && (!IS_ERR(pdata->pin_slow)))
+ pinctrl_select_state(pdata->pinctrl, pdata->pin_slow);
+ } else if (clock < FAST_CLOCK) {
+ if ((host->clock >= FAST_CLOCK) && (!IS_ERR(pdata->pin_default)))
+ pinctrl_select_state(pdata->pinctrl, pdata->pin_default);
+ } else {
+ if ((host->clock < FAST_CLOCK) && (!IS_ERR(pdata->pin_fast)))
+ pinctrl_select_state(pdata->pinctrl, pdata->pin_fast);
+ }
+
+ sdhci_set_clock(host, clock);
+}
+
+static unsigned long asr_clk_prepare(struct sdhci_host *host,
+ unsigned long rate)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ unsigned char timing = host->mmc->ios.timing;
+ struct asr_sdhci_dtr_data *dtr_data;
+ unsigned long preset_rate = 0, src_rate = 0;
+
+ if (!pdata || !pdata->dtr_data || !rate)
+ return rate;
+
+ if (timing > MMC_TIMING_MMC_HS400) {
+ pr_err("%s: invalid timing %d\n",
+ mmc_hostname(host->mmc), timing);
+ return rate;
+ }
+
+ dtr_data = &pdata->dtr_data[timing];
+ if (timing != dtr_data->timing)
+ return rate;
+
+ if ((MMC_TIMING_LEGACY == timing) && (rate < 25000000))
+ preset_rate = rate;
+ else
+ {
+ if(host->quirks2 & SDHCI_QUIRK2_CHANGE_SDIO_CLOCK_FREQ_DYNAMIC)
+ preset_rate = rate;
+ else
+ preset_rate = dtr_data->preset_rate;
+ }
+
+ src_rate = dtr_data->src_rate;
+ clk_set_rate(pltfm_host->clk, src_rate);
+ return preset_rate;
+}
+
+static void asr_set_delaycode(struct sdhci_host *host, int tx, u32 delay)
+{
+ if (tx)
+ asr_sw_tx_set_delaycode(host, delay);
+ else
+ asr_sw_rx_set_delaycode(host, delay);
+}
+
+static void asr_enable_delay_line(struct sdhci_host *host, int tx, int enable)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+ unsigned char timing = host->mmc->ios.timing;
+ struct asr_sdhci_dtr_data *dtr_data;
+
+ if (!pdata || !pdata->dtr_data)
+ return;
+
+ if (timing > MMC_TIMING_MMC_HS400) {
+ pr_err("%s: invalid timing %d\n", mmc_hostname(host->mmc),
+ timing);
+ return;
+ }
+
+ dtr_data = &pdata->dtr_data[timing];
+ if (timing != dtr_data->timing)
+ return;
+
+ if (tx) {
+ if (enable) {
+ asr_sw_tx_set_dlinereg(host, dtr_data->tx_dline_reg);
+ asr_sw_tx_set_delaycode(host, dtr_data->tx_delay);
+ asr_sw_tx_tuning_prepare(host);
+ } else {
+ asr_sw_tx_no_tuning(host);
+ }
+ } else {
+ if (enable) {
+ asr_sw_rx_tuning_prepare(host, dtr_data->rx_dline_reg);
+ asr_sw_rx_set_delaycode(host, dtr_data->rx_delay);
+ } else {
+ asr_select_rx_pad_clk(host);
+ }
+ }
+}
+
+static void asr_clk_gate_auto(struct sdhci_host *host, unsigned int ctrl)
+{
+ unsigned int reg;
+
+ reg = sdhci_readl(host, SDHC_OP_EXT_REG);
+ if (ctrl)
+ reg &= ~(OVRRD_CLK_OEN | FORCE_CLK_ON);
+ else
+ reg |= (OVRRD_CLK_OEN | FORCE_CLK_ON);
+ sdhci_writel(host, reg, SDHC_OP_EXT_REG);
+}
+
+static void asr_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+
+ sdhci_reset(host, mask);
+
+ if (mask != SDHCI_RESET_ALL) {
+ /* Return if not Reset All */
+ return;
+ }
+
+ /*
+ * tune timing of read data/command when crc error happen
+ * no performance impact
+ */
+ asr_set_tx_timing_cfg(host, pdata);
+ asr_set_rx_timing_cfg(host, pdata, host->clock);
+}
+
+#define MAX_WAIT_COUNT 74
+static void asr_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_asr *asr = sdhci_pltfm_priv(pltfm_host);
+ u32 tmp;
+ int count = 0;
+
+ if (asr->power_mode == MMC_POWER_UP && power_mode == MMC_POWER_ON) {
+ dev_dbg(mmc_dev(host->mmc),
+ "%s: slot->power_mode = %d,"
+ "ios->power_mode = %d\n",
+ __func__,
+ asr->power_mode,
+ power_mode);
+
+ /* clear the interrupt bit if posted and
+ * set we want notice of when 74 clocks are sent
+ */
+ tmp = sdhci_readl(host, SDHC_MMC_CTRL_REG);
+ tmp |= MISC_INT_EN;
+ sdhci_writel(host, tmp, SDHC_MMC_CTRL_REG);
+
+ /* start sending the 74 clocks */
+ tmp = sdhci_readl(host, SDHC_LEGACY_CTRL_REG);
+ tmp |= GEN_PAD_CLK_ON;
+ sdhci_writel(host, tmp, SDHC_LEGACY_CTRL_REG);
+
+ /* slowest speed is about 100KHz or 10usec per clock */
+ while (count++ < MAX_WAIT_COUNT) {
+ if (readw(host->ioaddr + SDHC_MMC_CTRL_REG)
+ & MISC_INT) {
+ break;
+ }
+ udelay(20);
+ }
+
+ if (count >= MAX_WAIT_COUNT)
+ dev_warn(mmc_dev(host->mmc),
+ "74 clock interrupt not cleared\n");
+
+ tmp = sdhci_readl(host, SDHC_MMC_CTRL_REG);
+ tmp |= MISC_INT;
+ sdhci_writel(host, tmp, SDHC_MMC_CTRL_REG);
+ }
+
+ asr->power_mode = power_mode;
+}
+
+static void asr_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+{
+ u16 reg;
+
+ if ((timing == MMC_TIMING_MMC_HS200) ||
+ (timing == MMC_TIMING_MMC_HS400)) {
+ reg = sdhci_readw(host, SDHC_MMC_CTRL_REG);
+ reg |= (timing == MMC_TIMING_MMC_HS200) ? MMC_HS200 : MMC_HS400;
+ sdhci_writew(host, reg, SDHC_MMC_CTRL_REG);
+ }
+ sdhci_set_uhs_signaling(host, timing);
+}
+
+static void asr_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ u8 pwr = host->pwr;
+
+ sdhci_set_power_noreg(host, mode, vdd);
+
+ if (host->pwr == pwr)
+ return;
+
+ if (host->pwr == 0)
+ vdd = 0;
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+
+ if (mode == MMC_POWER_OFF)
+ mmc_regulator_disable_vqmmc(mmc);
+ else
+ mmc_regulator_enable_vqmmc(mmc);
+}
+
+static void set_mmc1_aib(struct sdhci_host *host, int vol)
+{
+ u32 tmp;
+ void __iomem *aib_mmc1_io;
+ void __iomem *apbc_asfar;
+
+ aib_mmc1_io = ioremap(AIB_MMC1_IO_REG, 4);
+ apbc_asfar = ioremap(APBC_ASFAR, 8);
+
+ writel(AKEY_ASFAR, apbc_asfar);
+ writel(AKEY_ASSAR, apbc_asfar + 4);
+ tmp = readl(aib_mmc1_io);
+
+ if (vol >= 2800000)
+ tmp &= ~MMC1_PAD_1V8;
+ else
+ tmp |= MMC1_PAD_1V8;
+
+ writel(AKEY_ASFAR, apbc_asfar);
+ writel(AKEY_ASSAR, apbc_asfar + 4);
+ writel(tmp, aib_mmc1_io);
+
+ iounmap(apbc_asfar);
+ iounmap(aib_mmc1_io);
+}
+
+static void asr_sdhci_disable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+ if (host->ops->clr_wakeup_event)
+ host->ops->clr_wakeup_event(host);
+}
+
+static void asr_handle_none_irq(struct sdhci_host *host)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+ int ret;
+
+ if (pdata->check_sdh_wakeup_event) {
+ ret = pdata->check_sdh_wakeup_event();
+ if (ret)
+ asr_sdhci_disable_irq_wakeups(host);
+ }
+}
+
+static void asr_reset_wakeup_event(struct sdhci_host *host)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return;
+
+ if (pdata->reset_wakeup_event)
+ pdata->reset_wakeup_event();
+}
+
+static void asr_clr_wakeup_event(struct sdhci_host *host)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return;
+
+ if (pdata->clear_wakeup_event)
+ pdata->clear_wakeup_event();
+}
+
+static void asr_signal_vol_change(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_ios ios = mmc->ios;
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+ unsigned int set = 0;
+ u8 vol = ios.signal_voltage;
+
+ if (!pdata || !(pdata->quirks2 & SDHCI_QUIRK2_SET_AIB_MMC))
+ return;
+
+ switch (vol) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ set = 3300000;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ set = 1800000;
+ break;
+ case MMC_SIGNAL_VOLTAGE_120:
+ set = 1200000;
+ break;
+ default:
+ set = 3300000;
+ break;
+ }
+
+ set_mmc1_aib(host, set);
+}
+
+static void asr_access_constrain(struct sdhci_host *host, unsigned int ac)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return;
+
+ if (ac)
+ pm_qos_update_request(&pdata->qos_idle, pdata->lpm_qos);
+ else
+ pm_qos_update_request(&pdata->qos_idle,
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+}
+
+static void asr_prepare_tuning(struct sdhci_host *host, u32 val, bool done)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+ unsigned char timing = host->mmc->ios.timing;
+ struct asr_sdhci_dtr_data *dtr_data;
+
+ if (pdata && pdata->dtr_data) {
+ if (timing <= MMC_TIMING_MMC_HS400) {
+ dtr_data = &pdata->dtr_data[timing];
+ if (timing == dtr_data->timing && done)
+ dtr_data->rx_delay = val;
+ }
+ }
+
+ asr_sw_rx_set_delaycode(host, val);
+ dev_dbg(mmc_dev(host->mmc), "tunning with delay 0x%x \n", val);
+}
+
+/*
+ * return 0: sucess, >=1: the num of pattern check errors
+ */
+static int asr_tuning_pio_check(struct sdhci_host *host, int point)
+{
+ u32 rd_patten;
+ unsigned int i;
+ u32 *tuning_patten;
+ int patten_len;
+ int err = 0;
+
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
+ tuning_patten = (u32 *)tuning_patten8;
+ patten_len = ARRAY_SIZE(tuning_patten8);
+ } else {
+ tuning_patten = (u32 *)tuning_patten4;
+ patten_len = ARRAY_SIZE(tuning_patten4);
+ }
+
+ /* read all the data from FIFO, avoid error if IC design is not good */
+ for (i = 0; i < patten_len; i++) {
+ rd_patten = sdhci_readl(host, SDHCI_BUFFER);
+ if (rd_patten != tuning_patten[i])
+ err++;
+ }
+ dev_dbg(mmc_dev(host->mmc), "point: %d, error: %d\n", point, err);
+ return err;
+}
+
+static int asr_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
+ int point, unsigned long flags)
+{
+ struct mmc_command cmd = { 0 };
+ struct mmc_request mrq = { NULL };
+ int err = 0;
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ cmd.mrq = &mrq;
+ cmd.retries = 0;
+ cmd.data = NULL;
+ cmd.error = 0;
+
+ mrq.cmd = &cmd;
+
+ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
+ host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
+ else
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
+
+ /*
+ * The tuning block is sent by the card to the host controller.
+ * So we set the TRNS_READ bit in the Transfer Mode register.
+ * This also takes care of setting DMA Enable and Multi Block
+ * Select in the same register to 0.
+ */
+ sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
+
+ if (!sdhci_send_command_retry(host, &cmd, flags)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ host->tuning_done = 0;
+ return -EIO;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ /* Wait for Buffer Read Ready interrupt */
+ wait_event_timeout(host->buf_ready_int,
+ (host->tuning_done > 0), msecs_to_jiffies(50));
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->cmd = NULL;
+
+ sdhci_del_timer(host, &mrq);
+
+ if (host->tuning_done == 1) {
+ err = asr_tuning_pio_check(host, point);
+ } else {
+ pr_debug("%s: Timeout or error waiting for Buffer Read Ready interrupt"
+ " during tuning procedure, resetting CMD and DATA\n",
+ mmc_hostname(host->mmc));
+ sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
+ err = -EIO;
+ }
+
+ host->tuning_done = 0;
+ return err;
+}
+
+static int asr_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+ unsigned char timing = host->mmc->ios.timing;
+ struct asr_sdhci_dtr_data *dtr_data;
+ int min, max, ret;
+ int len = 0, avg = 0;
+ unsigned long flags = 0;
+ u32 ier, ier_new;
+ int dvfs_level;
+
+ dvfs_level = asr_sdh_get_highest_dvfs_level(host);
+ pdata->dvfs_level_sel = dvfs_level;
+ asr_sdh_request_dvfs_level(host, dvfs_level);
+
+ if (pdata && pdata->dtr_data) {
+ if (timing <= MMC_TIMING_MMC_HS400) {
+ dtr_data = &pdata->dtr_data[timing];
+ asr_sw_rx_tuning_prepare(host, dtr_data->rx_dline_reg);
+ }
+ }
+
+ /* change to pio mode during the tuning stage */
+ spin_lock_irqsave(&host->lock, flags);
+ ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+
+ ier_new = SDHCI_INT_DATA_AVAIL;
+ ier_new |= SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX;
+ ier_new |= SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT;
+ asr_sdhci_clear_set_irqs(host, ier, ier_new);
+
+ /* find the mininum delay first which can pass tuning */
+ min = SDHC_RX_TUNE_DELAY_MIN;
+ do {
+ while (min < SDHC_RX_TUNE_DELAY_MAX) {
+ asr_prepare_tuning(host, min, false);
+ if (!asr_send_tuning_cmd(host, opcode, min, flags))
+ break;
+ min += SDHC_RX_TUNE_DELAY_STEP;
+ }
+
+ /* find the maxinum delay which can not pass tuning */
+ max = min + SDHC_RX_TUNE_DELAY_STEP;
+ while (max < SDHC_RX_TUNE_DELAY_MAX) {
+ asr_prepare_tuning(host, max, false);
+ if (asr_send_tuning_cmd(host, opcode, max, flags))
+ break;
+ max += SDHC_RX_TUNE_DELAY_STEP;
+ }
+
+ if ((max - min) > len) {
+ len = max - min;
+ avg = (min + max - 1) / 2;
+ }
+ if ((max - min) > 20)
+ printk(KERN_DEBUG "%s: tuning pass window [%d : %d], len = %d\n",
+ mmc_hostname(host->mmc), min,
+ max - 1, max - min);
+ min = max + SDHC_RX_TUNE_DELAY_STEP;
+ } while (min < SDHC_RX_TUNE_DELAY_MAX);
+
+ asr_prepare_tuning(host, avg, true);
+ ret = asr_send_tuning_cmd(host, opcode, avg, flags);
+
+ asr_sdhci_clear_set_irqs(host, ier_new, ier);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (ret)
+ pr_err("%s: tunning failed at %d, pass window length is %d\n",
+ mmc_hostname(host->mmc), avg, len);
+ else
+ printk(KERN_DEBUG "%s: tunning passed at %d, pass window length is %d\n",
+ mmc_hostname(host->mmc), avg, len);
+ return ret;
+}
+
+void sdhci_postpone_clock_gate(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned int reg;
+ reg = sdhci_readl(host, SDHC_OP_EXT_REG);
+ reg |= (0xf<<16);
+ sdhci_writel(host, reg, SDHC_OP_EXT_REG);
+ pr_err("%s sdhci_postpone_clock_gate: read SDHC_OP_EXT_REG(0x%x) is 0x%x\n", mmc_hostname(host->mmc), SDHC_OP_EXT_REG, sdhci_readl(host, SDHC_OP_EXT_REG));
+}
+EXPORT_SYMBOL_GPL(sdhci_postpone_clock_gate);
+
+/*
+ * remove the caps that supported by the controller but not available
+ * for certain platforms.
+ */
+static void asr_host_caps_disable(struct sdhci_host *host)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+
+ if (pdata->host_caps_disable)
+ host->mmc->caps &= ~(pdata->host_caps_disable);
+ if (pdata->host_caps2_disable)
+ host->mmc->caps2 &= ~(pdata->host_caps2_disable);
+}
+
+static void sdhci_asr_hw_reset(struct sdhci_host *host)
+{
+ u32 *delays_rst = host->delays_rst;
+ int rst_gpio = host->rst_gpio;
+ int low_active_rst = host->low_active_rst;
+
+ if (rst_gpio < 0)
+ return;
+
+ if (delays_rst[0]) {
+ gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
+ usleep_range(delays_rst[0], delays_rst[0] + 100);
+ }
+
+ /* For eMMC, minimum is 1us but give it 9us for good measure */
+ gpio_set_value(rst_gpio, low_active_rst ? 0 : 1);
+ if (delays_rst[1])
+ udelay(delays_rst[1]);
+
+ gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
+
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ if (delays_rst[2])
+ usleep_range(delays_rst[2], delays_rst[2] + 100);
+}
+
+static void asr_dump_priv_regs(struct sdhci_host *host)
+{
+ printk(KERN_INFO "sdhci: OP_CTRL: 0x%08x\n",
+ sdhci_readl(host, SDHC_OP_CTRL));
+ printk(KERN_INFO "sdhci: OP_EXT_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_OP_EXT_REG));
+ printk(KERN_INFO "sdhci: LEGACY_CTRL_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_LEGACY_CTRL_REG));
+ printk(KERN_INFO "sdhci: MMC_CTRL_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_MMC_CTRL_REG));
+ printk(KERN_INFO "sdhci: RX_CFG_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_RX_CFG_REG));
+ printk(KERN_INFO "sdhci: TX_CFG_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_TX_CFG_REG));
+ printk(KERN_INFO "sdhci: HWTUNE_CFG_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_HWTUNE_CFG_REG));
+ printk(KERN_INFO "sdhci: HWTUNE_CFG2_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_HWTUNE_CFG2_REG));
+ printk(KERN_INFO "sdhci: ROUNDTRIP_TIMING_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_ROUNDTRIP_TIMING_REG));
+ printk(KERN_INFO "sdhci: GPIO_CFG_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_GPIO_CFG_REG));
+ printk(KERN_INFO "sdhci: DLINE_CTRL_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_DLINE_CTRL_REG));
+ printk(KERN_INFO "sdhci: DLINE_CFG_REG: 0x%08x\n",
+ sdhci_readl(host, SDHC_DLINE_CFG_REG));
+}
+
+static const struct sdhci_ops asr_sdhci_ops = {
+ .set_delay_val = asr_set_delaycode,
+ .enable_delay_line = asr_enable_delay_line,
+ .set_clock = asr_set_clock,
+ .set_power = asr_set_power,
+ .reset = asr_sdhci_reset,
+ .set_uhs_signaling = asr_set_uhs_signaling,
+ .platform_send_init_74_clocks = asr_gen_init_74_clocks,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .dump_vendor_regs = asr_dump_priv_regs,
+ .clk_prepare = asr_clk_prepare,
+ .reset_wakeup_event = asr_reset_wakeup_event,
+ .clr_wakeup_event = asr_clr_wakeup_event,
+ .voltage_switch = asr_signal_vol_change,
+ .clk_gate_auto = asr_clk_gate_auto,
+ .platform_handle_none_irq = asr_handle_none_irq,
+ .platform_execute_tuning = asr_execute_tuning,
+ .host_caps_disable = asr_host_caps_disable,
+ .hw_reset = sdhci_asr_hw_reset,
+};
+
+static struct sdhci_pltfm_data sdhci_asr_pdata = {
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .ops = &asr_sdhci_ops,
+};
+
+static int asr_init_host_with_pdata(struct sdhci_host *host,
+ struct asr_sdhci_platdata *pdata)
+{
+ int ret = 0;
+
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
+ if (!(pdata->flags & PXA_FLAG_DISABLE_CLOCK_AUTO_GATING))
+ host->mmc->caps2 |= MMC_CAP2_BUS_AUTO_CLK_GATE;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+ if (pdata->quirks2)
+ host->quirks2 |= pdata->quirks2;
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+ if (pdata->host_caps2)
+ host->mmc->caps2 |= pdata->host_caps2;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_asr_of_match[] = {
+ {
+ .compatible = "asr,sdhci",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_asr_of_match);
+
+static void asr_get_of_perperty(struct sdhci_host *host,
+ struct device *dev, struct asr_sdhci_platdata *pdata)
+{
+ struct device_node *np = dev->of_node;
+ struct asr_sdhci_dtr_data *dtr_data;
+ struct property *prop;
+ const __be32 *p;
+ u32 tmp, val, timing;
+ u32 *delays_rst = host->delays_rst;
+ int rst_gpio;
+
+ host->rst_gpio = -1;
+ rst_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (rst_gpio >= 0) {
+ host->low_active_rst = of_property_read_bool(np, "reset-active-low");
+ if (of_property_read_u32_array(np, "reset-delays-us",
+ delays_rst, 3)) {
+ delays_rst[0] = 0;
+ delays_rst[1] = 10;
+ delays_rst[3] = 300;
+ }
+
+ if (gpio_request(rst_gpio, "mmc-reset")) {
+ printk("%s: reset-gpio=%d request failed\n",
+ mmc_hostname(host->mmc), rst_gpio);
+ return;
+ }
+
+ gpio_direction_output(rst_gpio, host->low_active_rst ? 1 : 0);
+
+ host->rst_gpio = rst_gpio;
+ host->mmc->caps |= MMC_CAP_HW_RESET;
+ }
+
+ if (!of_property_read_u32(np, "asr,sdh-flags", &tmp))
+ pdata->flags |= tmp;
+
+ of_property_read_u32(np, "asr,max-speed", &pdata->max_speed);
+
+ if (!of_property_read_u32(np, "asr,sdh-host-caps", &tmp))
+ pdata->host_caps |= tmp;
+ if (!of_property_read_u32(np, "asr,sdh-host-caps2", &tmp))
+ pdata->host_caps2 |= tmp;
+ if (!of_property_read_u32(np, "asr,sdh-host-caps-disable", &tmp))
+ pdata->host_caps_disable |= tmp;
+ if (!of_property_read_u32(np, "asr,sdh-host-caps2-disable", &tmp))
+ pdata->host_caps2_disable |= tmp;
+ if (!of_property_read_u32(np, "asr,sdh-quirks", &tmp))
+ pdata->quirks |= tmp;
+ if (!of_property_read_u32(np, "asr,sdh-quirks2", &tmp))
+ pdata->quirks2 |= tmp;
+ if (!of_property_read_u32(np, "asr,sdh-pm-caps", &tmp))
+ pdata->pm_caps |= tmp;
+ if (!of_property_read_u32(np, "lpm-qos", &tmp))
+ pdata->lpm_qos = tmp;
+ else
+ pdata->lpm_qos = PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE;
+
+ if (!of_property_read_u32(np, "asr,sdh-tuning-win-limit", &tmp))
+ pdata->tuning_win_limit = tmp;
+ else
+ pdata->tuning_win_limit = 100; /* default limit value */
+
+ /*
+ * property "asr,sdh-dtr-data": <timing preset_rate src_rate tx_delay rx_delay>, [<..>]
+ * allow to set clock related parameters.
+ */
+ if (of_property_read_bool(np, "asr,sdh-dtr-data")) {
+ dtr_data = devm_kzalloc(dev,
+ (MMC_TIMING_MMC_HS400 + 1) * sizeof(struct asr_sdhci_dtr_data),
+ GFP_KERNEL);
+ if (!dtr_data) {
+ dev_err(dev, "failed to allocate memory for sdh-dtr-data\n");
+ return;
+ }
+ of_property_for_each_u32(np, "asr,sdh-dtr-data", prop, p, timing) {
+ if (timing > MMC_TIMING_MMC_HS400) {
+ dev_err(dev, "invalid timing %d on sdh-dtr-data prop\n",
+ timing);
+ continue;
+ } else {
+ dtr_data[timing].timing = timing;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(dev, "missing preset_rate for timing %d\n",
+ timing);
+ } else {
+ dtr_data[timing].preset_rate = val;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(dev, "missing src_rate for timing %d\n",
+ timing);
+ } else {
+ dtr_data[timing].src_rate = val;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(dev, "missing tx_delay for timing %d\n",
+ timing);
+ } else {
+ dtr_data[timing].tx_delay = val;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(dev, "missing rx_delay for timing %d\n",
+ timing);
+ } else {
+ dtr_data[timing].rx_delay = val;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(dev, "missing tx_dline_reg for timing %d\n",
+ timing);
+ } else {
+ dtr_data[timing].tx_dline_reg = val;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(dev, "missing rx_dline_reg for timing %d\n",
+ timing);
+ } else {
+ dtr_data[timing].rx_dline_reg = val;
+ }
+ }
+ pdata->dtr_data = dtr_data;
+ }
+}
+#endif
+
+static int asr_sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = NULL;
+ struct sdhci_asr *asr = NULL;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_asr_pdata, sizeof(*asr));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ asr = sdhci_pltfm_priv(pltfm_host);
+
+ asr->clk_io = devm_clk_get(dev, "sdh-io");
+ if (IS_ERR(asr->clk_io))
+ asr->clk_io = devm_clk_get(dev, NULL);
+ if (IS_ERR(asr->clk_io)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(asr->clk_io);
+ goto err_clk_get;
+ }
+ pltfm_host->clk = asr->clk_io;
+ clk_prepare_enable(asr->clk_io);
+
+ asr->clk_core = devm_clk_get(dev, "sdh-core");
+ if (!IS_ERR(asr->clk_core))
+ clk_prepare_enable(asr->clk_core);
+
+ host->quirks2 = SDHCI_QUIRK2_TIMEOUT_DIVIDE_4
+ | SDHCI_QUIRK2_NO_CURRENT_LIMIT
+ | SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ match = of_match_device(of_match_ptr(sdhci_asr_of_match), &pdev->dev);
+ if (match) {
+ mmc_of_parse(host->mmc);
+ sdhci_get_of_property(pdev);
+ }
+
+ if (!pdata) {
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to alloc pdata\n");
+ goto err_init_host;
+ }
+ pdev->dev.platform_data = pdata;
+ }
+ asr_get_of_perperty(host, dev, pdata);
+
+ pdata->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(pdata->pinctrl))
+ dev_err(dev, "could not get pinctrl handle\n");
+ pdata->pin_default = pinctrl_lookup_state(pdata->pinctrl, "default");
+ if (IS_ERR(pdata->pin_default))
+ dev_err(dev, "could not get default pinstate\n");
+ pdata->pin_slow = pinctrl_lookup_state(pdata->pinctrl, "slow");
+ if (IS_ERR(pdata->pin_slow))
+ dev_err(dev, "could not get slow pinstate\n");
+ else
+ pinctrl_select_state(pdata->pinctrl, pdata->pin_slow);
+ pdata->pin_fast = pinctrl_lookup_state(pdata->pinctrl, "fast");
+ if (IS_ERR(pdata->pin_fast))
+ dev_info(dev, "could not get fast pinstate\n");
+
+ ret = asr_init_host_with_pdata(host, pdata);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to init host with pdata\n");
+ goto err_init_host;
+ }
+ pdata->qos_idle.name = pdev->name;
+ pm_qos_add_request(&pdata->qos_idle, PM_QOS_CPUIDLE_BLOCK,
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+
+ /*
+ * as RPM will set as active just below, so here enable dvfs too
+ * And there is not dvfs requst by default, the driver needs to
+ * call pxa_sdh_request_dvfs_level when need.
+ */
+ asr_sdh_create_dvfs(host);
+ asr_sdh_request_dvfs_level(host, 0);
+ asr_sdh_enable_dvfs(host);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, ASR_RPM_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+ pm_runtime_enable(&pdev->dev);
+
+ /* dma only 32 bit now */
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ asr_access_constrain(host, 1);
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ if (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ)
+ device_init_wakeup(&pdev->dev, 1);
+ else
+ device_init_wakeup(&pdev->dev, 0);
+
+ pm_runtime_put_autosuspend(&pdev->dev);
+ return 0;
+
+err_add_host:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+err_init_host:
+ clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(asr->clk_core);
+ if (pdata)
+ pm_qos_remove_request(&pdata->qos_idle);
+err_clk_get:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int asr_sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_asr *asr = sdhci_pltfm_priv(pltfm_host);
+ struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
+
+ pm_runtime_get_sync(&pdev->dev);
+ sdhci_remove_host(host, 1);
+ pm_runtime_disable(&pdev->dev);
+
+ if (pdata)
+ pm_qos_remove_request(&pdata->qos_idle);
+
+ clk_disable_unprepare(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+
+ sdhci_pltfm_free(pdev);
+ kfree(asr);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_asr_suspend(struct device *dev)
+{
+ int ret;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ ret = sdhci_suspend_host(host);
+ if (ret)
+ return ret;
+
+ if (host->mmc->caps & MMC_CAP_CD_WAKE)
+ mmc_gpio_set_cd_wake(host->mmc, true);
+
+ ret = pm_runtime_force_suspend(dev);
+ return ret;
+}
+
+static int sdhci_asr_resume(struct device *dev)
+{
+ int ret;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(dev, "failed to resume pm_runtime (%d)\n", ret);
+ return ret;
+ }
+
+ ret = sdhci_resume_host(host);
+
+ if (host->mmc->caps & MMC_CAP_CD_WAKE)
+ mmc_gpio_set_cd_wake(host->mmc, false);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int sdhci_asr_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_asr *asr = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ asr_access_constrain(host, 0);
+ if (host->quirks2 & SDHCI_QUIRK2_BASE_CLOCK_ALWAYS_ON)
+ goto fakeclk;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ clk_disable_unprepare(pltfm_host->clk);
+ if (!IS_ERR(asr->clk_core))
+ clk_disable_unprepare(asr->clk_core);
+
+fakeclk:
+ asr_sdh_disable_dvfs(host);
+ return 0;
+}
+
+static int sdhci_asr_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_asr *asr = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ asr_sdh_enable_dvfs(host);
+
+ asr_access_constrain(host, 1);
+ if (host->quirks2 & SDHCI_QUIRK2_BASE_CLOCK_ALWAYS_ON)
+ return 0;
+
+ clk_prepare_enable(pltfm_host->clk);
+ if (!IS_ERR(asr->clk_core))
+ clk_prepare_enable(asr->clk_core);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_asr_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_asr_suspend, sdhci_asr_resume)
+ SET_RUNTIME_PM_OPS(sdhci_asr_runtime_suspend,
+ sdhci_asr_runtime_resume, NULL)
+};
+
+static struct platform_driver asr_sdhci_driver = {
+ .driver = {
+ .name = "sdhci-asr",
+#ifdef CONFIG_OF
+ .of_match_table = sdhci_asr_of_match,
+#endif
+ .owner = THIS_MODULE,
+ .pm = &sdhci_asr_pmops,
+ },
+ .probe = asr_sdhci_probe,
+ .remove = asr_sdhci_remove,
+};
+
+module_platform_driver(asr_sdhci_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for ASR");
+MODULE_AUTHOR("ASR Microelectronics Ltd.");
+MODULE_LICENSE("GPL v2");
+