zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/card/Kconfig b/ap/os/linux/linux-3.4.x/drivers/mmc/card/Kconfig
new file mode 100644
index 0000000..ebb4afe
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/card/Kconfig
@@ -0,0 +1,78 @@
+#
+# MMC/SD card drivers
+#
+
+comment "MMC/SD/SDIO Card Drivers"
+
+config MMC_BLOCK
+	tristate "MMC block device driver"
+	depends on BLOCK
+	default y
+	help
+	  Say Y here to enable the MMC block device driver support.
+	  This provides a block device driver, which you can use to
+	  mount the filesystem. Almost everyone wishing MMC support
+	  should say Y or M here.
+
+config MMC_BLOCK_MINORS
+	int "Number of minors per block device"
+	depends on MMC_BLOCK
+	range 4 256
+	default 8
+	help
+	  Number of minors per block device. One is needed for every
+	  partition on the disk (plus one for the whole disk).
+
+	  Number of total MMC minors available is 256, so your number
+	  of supported block devices will be limited to 256 divided
+	  by this number.
+
+	  Default is 8 to be backwards compatible with previous
+	  hardwired device numbering.
+
+	  If unsure, say 8 here.
+
+config MMC_BLOCK_BOUNCE
+	bool "Use bounce buffer for simple hosts"
+	depends on MMC_BLOCK
+	default y
+	help
+	  SD/MMC is a high latency protocol where it is crucial to
+	  send large requests in order to get high performance. Many
+	  controllers, however, are restricted to continuous memory
+	  (i.e. they can't do scatter-gather), something the kernel
+	  rarely can provide.
+
+	  Say Y here to help these restricted hosts by bouncing
+	  requests back and forth from a large buffer. You will get
+	  a big performance gain at the cost of up to 64 KiB of
+	  physical memory.
+
+	  If unsure, say Y here.
+
+config MMC_BLOCK_DEFERRED_RESUME
+	bool "Deferr MMC layer resume until I/O is requested"
+	depends on MMC_BLOCK
+	default n
+	help
+	  Say Y here to enable deferred MMC resume until I/O
+	  is requested. This will reduce overall resume latency and
+	  save power when theres an SD card inserted but not being used.
+
+config SDIO_UART
+	tristate "SDIO UART/GPS class support"
+	help
+	  SDIO function driver for SDIO cards that implements the UART
+	  class, as well as the GPS class which appears like a UART.
+
+config MMC_TEST
+	tristate "MMC host test driver"
+	help
+	  Development driver that performs a series of reads and writes
+	  to a memory card in order to expose certain well known bugs
+	  in host controllers. The tests are executed by writing to the
+	  "test" file in debugfs under each card. Note that whatever is
+	  on your card will be overwritten by these tests.
+
+	  This driver is only of interest to those developing or
+	  testing a host driver. Most people should say N here.
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/card/Makefile b/ap/os/linux/linux-3.4.x/drivers/mmc/card/Makefile
new file mode 100644
index 0000000..c73b406
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/card/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for MMC/SD card drivers
+#
+
+obj-$(CONFIG_MMC_BLOCK)		+= mmc_block.o
+mmc_block-objs			:= block.o queue.o
+obj-$(CONFIG_MMC_TEST)		+= mmc_test.o
+
+obj-$(CONFIG_SDIO_UART)		+= sdio_uart.o
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/card/block.c b/ap/os/linux/linux-3.4.x/drivers/mmc/card/block.c
new file mode 100644
index 0000000..84c5ac7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/card/block.c
@@ -0,0 +1,2097 @@
+/*
+ * Block driver for media (i.e., flash cards)
+ *
+ * Copyright 2002 Hewlett-Packard Company
+ * Copyright 2005-2008 Pierre Ossman
+ *
+ * Use consistent with the GNU GPL is permitted,
+ * provided that this copyright notice is
+ * preserved in its entirety in all copies and derived works.
+ *
+ * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
+ * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * Many thanks to Alessandro Rubini and Jonathan Corbet!
+ *
+ * Author:  Andrew Christian
+ *          28 May 2002
+ */
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
+
+#include <linux/mmc/ioctl.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include <asm/uaccess.h>
+
+#include "queue.h"
+#include "../core/core.h"
+#include "../core/mmc_ops.h"
+
+
+MODULE_ALIAS("mmc:block");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "mmcblk."
+
+//#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
+#define MMC_BLK_TIMEOUT_MS  (1 * 1 * 1000)        /* 1 second timeout. refer EC 616000480970 */
+
+#define INAND_CMD38_ARG_EXT_CSD  113
+#define INAND_CMD38_ARG_ERASE    0x00
+#define INAND_CMD38_ARG_TRIM     0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+
+static DEFINE_MUTEX(block_mutex);
+
+/*
+ * The defaults come from config options but can be overriden by module
+ * or bootarg options.
+ */
+static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
+
+/*
+ * We've only got one major, so number of mmcblk devices is
+ * limited to 256 / number of minors per device.
+ */
+static int max_devices;
+
+/* 256 minors, so at most 256 separate devices */
+static DECLARE_BITMAP(dev_use, 256);
+static DECLARE_BITMAP(name_use, 256);
+
+/*
+ * There is one mmc_blk_data per slot.
+ */
+struct mmc_blk_data {
+	spinlock_t	lock;
+	struct gendisk	*disk;
+	struct mmc_queue queue;
+	struct list_head part;
+
+	unsigned int	flags;
+#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
+
+	unsigned int	usage;
+	unsigned int	read_only;
+	unsigned int	part_type;
+	unsigned int	name_idx;
+	unsigned int	reset_done;
+#define MMC_BLK_READ		BIT(0)
+#define MMC_BLK_WRITE		BIT(1)
+#define MMC_BLK_DISCARD		BIT(2)
+#define MMC_BLK_SECDISCARD	BIT(3)
+
+	/*
+	 * Only set in main mmc_blk_data associated
+	 * with mmc_card with mmc_set_drvdata, and keeps
+	 * track of the current selected device partition.
+	 */
+	unsigned int	part_curr;
+	struct device_attribute force_ro;
+	struct device_attribute power_ro_lock;
+	int	area_type;
+};
+
+static DEFINE_MUTEX(open_lock);
+
+enum mmc_blk_status {
+	MMC_BLK_SUCCESS = 0,
+	MMC_BLK_PARTIAL,
+	MMC_BLK_CMD_ERR,
+	MMC_BLK_RETRY,
+	MMC_BLK_ABORT,
+	MMC_BLK_DATA_ERR,
+	MMC_BLK_ECC_ERR,
+	MMC_BLK_NOMEDIUM,
+};
+
+module_param(perdev_minors, int, 0444);
+MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+
+static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
+{
+	struct mmc_blk_data *md;
+
+	mutex_lock(&open_lock);
+	md = disk->private_data;
+	if (md && md->usage == 0)
+		md = NULL;
+	if (md)
+		md->usage++;
+	mutex_unlock(&open_lock);
+
+	return md;
+}
+
+static inline int mmc_get_devidx(struct gendisk *disk)
+{
+	int devidx = disk->first_minor / perdev_minors;
+	return devidx;
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+	mutex_lock(&open_lock);
+	md->usage--;
+	if (md->usage == 0) {
+		int devidx = mmc_get_devidx(md->disk);
+		blk_cleanup_queue(md->queue.queue);
+
+		__clear_bit(devidx, dev_use);
+
+		put_disk(md->disk);
+		kfree(md);
+	}
+	mutex_unlock(&open_lock);
+}
+
+static ssize_t power_ro_lock_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct mmc_blk_data *md;
+	struct mmc_card *card;
+	int locked = 0;
+
+	md = mmc_blk_get(dev_to_disk(dev));
+#ifdef CONFIG_KLOCWORK
+	if (!md)
+		return 0;
+#endif
+	card = md->queue.card;
+
+	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+		locked = 2;
+	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+		locked = 1;
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+
+	return ret;
+}
+
+static ssize_t power_ro_lock_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+	struct mmc_blk_data *md, *part_md;
+	struct mmc_card *card;
+	unsigned long set;
+
+	if (kstrtoul(buf, 0, &set))
+		return -EINVAL;
+
+	if (set != 1)
+		return count;
+
+	md = mmc_blk_get(dev_to_disk(dev));
+#ifdef CONFIG_KLOCWORK
+	if (!md)
+		return -ENODEV;
+#endif
+	card = md->queue.card;
+
+	mmc_claim_host(card->host);
+
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+				card->ext_csd.boot_ro_lock |
+				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+				card->ext_csd.part_time);
+	if (ret)
+		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
+	else
+		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+
+	mmc_release_host(card->host);
+
+	if (!ret) {
+		pr_info("%s: Locking boot partition ro until next power on\n",
+			md->disk->disk_name);
+		set_disk_ro(md->disk, 1);
+
+		list_for_each_entry(part_md, &md->part, part)
+			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
+				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
+				set_disk_ro(part_md->disk, 1);
+			}
+	}
+
+	mmc_blk_put(md);
+	return count;
+}
+
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	int ret;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+#ifdef CONFIG_KLOCWORK
+	if (!md)
+		return 0;
+#endif
+
+	ret = snprintf(buf, PAGE_SIZE, "%d",
+		       get_disk_ro(dev_to_disk(dev)) ^
+		       md->read_only);
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	int ret;
+	char *end;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	unsigned long set = simple_strtoul(buf, &end, 0);
+#ifdef CONFIG_KLOCWORK
+	if (!md)
+		return -ENODEV;
+#endif
+	if (end == buf) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	set_disk_ro(dev_to_disk(dev), set || md->read_only);
+	ret = count;
+out:
+	mmc_blk_put(md);
+	return ret;
+}
+
+static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
+{
+	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
+	int ret = -ENXIO;
+
+	mutex_lock(&block_mutex);
+	if (md) {
+		if (md->usage == 2)
+			check_disk_change(bdev);
+		ret = 0;
+
+		if ((mode & FMODE_WRITE) && md->read_only) {
+			mmc_blk_put(md);
+			ret = -EROFS;
+		}
+	}
+	mutex_unlock(&block_mutex);
+
+	return ret;
+}
+
+static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
+{
+	struct mmc_blk_data *md = disk->private_data;
+
+	mutex_lock(&block_mutex);
+	mmc_blk_put(md);
+	mutex_unlock(&block_mutex);
+	return 0;
+}
+
+static int
+mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+	geo->heads = 4;
+	geo->sectors = 16;
+	return 0;
+}
+
+struct mmc_blk_ioc_data {
+	struct mmc_ioc_cmd ic;
+	unsigned char *buf;
+	u64 buf_bytes;
+};
+
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+	struct mmc_ioc_cmd __user *user)
+{
+	struct mmc_blk_ioc_data *idata;
+	int err;
+
+	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+	if (!idata) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+		err = -EFAULT;
+		goto idata_err;
+	}
+	if((idata->ic.blocks < MMC_IOC_MAX_BYTES)&&(idata->ic.blksz < MMC_IOC_MAX_BYTES))	
+		idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+	
+	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+		err = -EOVERFLOW;
+		goto idata_err;
+	}
+
+	if (!idata->buf_bytes)
+		return idata;
+
+	idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
+	if (!idata->buf) {
+		err = -ENOMEM;
+		goto idata_err;
+	}
+
+	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
+					idata->ic.data_ptr, idata->buf_bytes)) {
+		err = -EFAULT;
+		goto copy_err;
+	}
+
+	return idata;
+
+copy_err:
+	kfree(idata->buf);
+idata_err:
+	kfree(idata);
+out:
+	return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+	struct mmc_ioc_cmd __user *ic_ptr)
+{
+	struct mmc_blk_ioc_data *idata;
+	struct mmc_blk_data *md;
+	struct mmc_card *card;
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
+	struct mmc_request mrq = {NULL};
+	struct scatterlist sg;
+	int err = 0;
+
+	/*
+	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+	 * whole block device, not on a partition.  This prevents overspray
+	 * between sibling partitions.
+	 */
+	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+		return -EPERM;
+
+	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+	if (IS_ERR(idata))
+		return PTR_ERR(idata);
+
+	md = mmc_blk_get(bdev->bd_disk);
+	if (!md) {
+		err = -EINVAL;
+		goto cmd_done;
+	}
+
+	card = md->queue.card;
+	if (IS_ERR(card)) {
+		err = PTR_ERR(card);
+		goto cmd_done;
+	}
+
+	cmd.opcode = idata->ic.opcode;
+	cmd.arg = idata->ic.arg;
+	cmd.flags = idata->ic.flags;
+
+	if (idata->buf_bytes) {
+		data.sg = &sg;
+		data.sg_len = 1;
+		data.blksz = idata->ic.blksz;
+		data.blocks = idata->ic.blocks;
+
+		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+		if (idata->ic.write_flag)
+			data.flags = MMC_DATA_WRITE;
+		else
+			data.flags = MMC_DATA_READ;
+
+		/* data.flags must already be set before doing this. */
+		mmc_set_data_timeout(&data, card);
+
+		/* Allow overriding the timeout_ns for empirical tuning. */
+		if (idata->ic.data_timeout_ns)
+			data.timeout_ns = idata->ic.data_timeout_ns;
+
+		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+			/*
+			 * Pretend this is a data transfer and rely on the
+			 * host driver to compute timeout.  When all host
+			 * drivers support cmd.cmd_timeout for R1B, this
+			 * can be changed to:
+			 *
+			 *     mrq.data = NULL;
+			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+			 */
+			 if(idata->ic.cmd_timeout_ms < (UINT_MAX/100000))
+				data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+		}
+
+		mrq.data = &data;
+	}
+
+	mrq.cmd = &cmd;
+
+	mmc_claim_host(card->host);
+
+	if (idata->ic.is_acmd) {
+		err = mmc_app_cmd(card->host, card);
+		if (err)
+			goto cmd_rel_host;
+	}
+
+	mmc_wait_for_req(card->host, &mrq);
+
+	if (cmd.error) {
+		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+						__func__, cmd.error);
+		err = cmd.error;
+		goto cmd_rel_host;
+	}
+	if (data.error) {
+		dev_err(mmc_dev(card->host), "%s: data error %d\n",
+						__func__, data.error);
+		err = data.error;
+		goto cmd_rel_host;
+	}
+
+	/*
+	 * According to the SD specs, some commands require a delay after
+	 * issuing the command.
+	 */
+	if ((idata->ic.postsleep_min_us)&&(idata->ic.postsleep_min_us < idata->ic.postsleep_max_us))
+		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+	if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+		err = -EFAULT;
+		goto cmd_rel_host;
+	}
+
+	if (!idata->ic.write_flag) {
+		if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
+						idata->buf, idata->buf_bytes)) {
+			err = -EFAULT;
+			goto cmd_rel_host;
+		}
+	}
+
+cmd_rel_host:
+	mmc_release_host(card->host);
+
+cmd_done:
+	if (md)
+		mmc_blk_put(md);
+	kfree(idata->buf);
+	kfree(idata);
+	return err;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+	unsigned int cmd, unsigned long arg)
+{
+	int ret = -EINVAL;
+	if (cmd == MMC_IOC_CMD)
+		ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+	unsigned int cmd, unsigned long arg)
+{
+	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static const struct block_device_operations mmc_bdops = {
+	.open			= mmc_blk_open,
+	.release		= mmc_blk_release,
+	.getgeo			= mmc_blk_getgeo,
+	.owner			= THIS_MODULE,
+	.ioctl			= mmc_blk_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= mmc_blk_compat_ioctl,
+#endif
+};
+
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+				      struct mmc_blk_data *md)
+{
+	int ret;
+	struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+
+	if(!main_md)
+		return -EINVAL;
+	
+	if (main_md->part_curr == md->part_type)
+		return 0;
+
+	if (mmc_card_mmc(card)) {
+		u8 part_config = card->ext_csd.part_config;
+
+		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+		part_config |= md->part_type;
+
+		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_PART_CONFIG, part_config,
+				 card->ext_csd.part_time);
+		if (ret)
+			return ret;
+
+		card->ext_csd.part_config = part_config;
+	}
+
+	main_md->part_curr = md->part_type;
+	return 0;
+}
+
+static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+{
+	int err;
+	u32 result;
+	__be32 *blocks;
+
+	struct mmc_request mrq = {NULL};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
+	unsigned int timeout_us;
+
+	struct scatterlist sg;
+
+	cmd.opcode = MMC_APP_CMD;
+	cmd.arg = card->rca << 16;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+	err = mmc_wait_for_cmd(card->host, &cmd, 0);
+	if (err)
+		return (u32)-1;
+	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
+		return (u32)-1;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
+	cmd.arg = 0;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	data.timeout_ns = card->csd.tacc_ns * 100;
+	data.timeout_clks = card->csd.tacc_clks * 100;
+
+	timeout_us = data.timeout_ns / 1000;
+	timeout_us += data.timeout_clks * 1000 /
+		(card->host->ios.clock / 1000);
+
+	if (timeout_us > 100000) {
+		data.timeout_ns = 100000000;
+		data.timeout_clks = 0;
+	}
+
+	data.blksz = 4;
+	data.blocks = 1;
+	data.flags = MMC_DATA_READ;
+	data.sg = &sg;
+	data.sg_len = 1;
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+
+#ifdef CONFIG_KLOCWORK
+	blocks = kzalloc(4, GFP_KERNEL);
+#else
+	blocks = kmalloc(4, GFP_KERNEL);
+#endif
+	if (!blocks)
+		return (u32)-1;
+
+	sg_init_one(&sg, blocks, 4);
+
+	mmc_wait_for_req(card->host, &mrq);
+
+	result = ntohl(*blocks);
+	kfree(blocks);
+
+	if (cmd.error || data.error)
+		result = (u32)-1;
+
+	return result;
+}
+
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+		bool hw_busy_detect, struct request *req, bool *gen_err)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+	int err = 0;
+	u32 status;
+	int check_cnt = 0;
+
+	do {
+		err = __mmc_send_status(card, &status, 5);
+		if (err) {
+			pr_err("%s: error %d requesting status\n",
+			       req->rq_disk->disk_name, err);
+			return err;
+		}
+
+		if (status & R1_ERROR) {
+			pr_err("%s: %s: error sending status cmd, status %#x\n",
+				req->rq_disk->disk_name, __func__, status);
+			*gen_err = true;
+		}
+
+		/* We may rely on the host hw to handle busy detection.*/
+		if (hw_busy_detect &&
+			(card->host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+			break;
+
+		/*
+		 * Timeout if the device never becomes ready for data and never
+		 * leaves the program state.
+		 */
+		if (time_after(jiffies, timeout)) {
+			pr_err("%s: Card stuck in programming state! %s %s\n",
+				mmc_hostname(card->host),
+				req->rq_disk->disk_name, __func__);
+			return -ETIMEDOUT;
+		}
+
+		/*
+		check_cnt--;
+		if(!check_cnt)
+		   break;
+		*/
+		check_cnt++;
+		if (check_cnt > 30)
+			mmc_delay(10);
+		else if (check_cnt > 20)
+			mmc_delay(1);
+		else if (check_cnt > 5)
+			udelay(100);
+
+		/*
+		 * Some cards mishandle the status bits,
+		 * so make sure to check both the busy
+		 * indication and the card state.
+		 */
+	} while (!(status & R1_READY_FOR_DATA) ||
+		 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+
+	return err;
+}
+
+static int send_stop(struct mmc_card *card, u32 *status)
+{
+	struct mmc_command cmd = {0};
+	int err;
+
+	cmd.opcode = MMC_STOP_TRANSMISSION;
+	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+	err = mmc_wait_for_cmd(card->host, &cmd, 5);
+	if (err == 0)
+		*status = cmd.resp[0];
+	return err;
+}
+
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
+{
+	struct mmc_command cmd = {0};
+	int err;
+
+	cmd.opcode = MMC_SEND_STATUS;
+	if (!mmc_host_is_spi(card->host))
+		cmd.arg = card->rca << 16;
+	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+	err = mmc_wait_for_cmd(card->host, &cmd, retries);
+	if (err == 0)
+		*status = cmd.resp[0];
+	return err;
+}
+
+#define ERR_NOMEDIUM	3
+#define ERR_RETRY	2
+#define ERR_ABORT	1
+#define ERR_CONTINUE	0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+	bool status_valid, u32 status)
+{
+	switch (error) {
+	case -EILSEQ:
+		/* response crc error, retry the r/w cmd */
+		pr_err("%s: %s sending %s command, card status %#x\n",
+			req->rq_disk->disk_name, "response CRC error",
+			name, status);
+		return ERR_RETRY;
+
+	case -ETIMEDOUT:
+		pr_err("%s: %s sending %s command, card status %#x\n",
+			req->rq_disk->disk_name, "timed out", name, status);
+
+		/* If the status cmd initially failed, retry the r/w cmd */
+		if (!status_valid) {
+			pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
+			return ERR_RETRY;
+		}
+		/*
+		 * If it was a r/w cmd crc error, or illegal command
+		 * (eg, issued in wrong state) then retry - we should
+		 * have corrected the state problem above.
+		 */
+		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+			pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
+			return ERR_RETRY;
+		}
+
+		/* Otherwise abort the command */
+		pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
+		return ERR_ABORT;
+
+	default:
+		/* We don't understand the error code the driver gave us */
+		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+		       req->rq_disk->disk_name, error, status);
+		return ERR_ABORT;
+	}
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state.  Essentially, we do this as follows:
+ * - Obtain card status.  If the first attempt to obtain card status fails,
+ *   the status word will reflect the failed status cmd, not the failed
+ *   r/w cmd.  If we fail to obtain card status, it suggests we can no
+ *   longer communicate with the card.
+ * - Check the card state.  If the card received the cmd but there was a
+ *   transient problem with the response, it might still be in a data transfer
+ *   mode.  Try to send it a stop command.  If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ *   transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ *   illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+	struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+{
+	bool prev_cmd_status_valid = true;
+	u32 status, stop_status = 0;
+	int err, retry;
+
+	if (mmc_card_removed(card))
+		return ERR_NOMEDIUM;
+
+	/*
+	 * Try to get card status which indicates both the card state
+	 * and why there was no response.  If the first attempt fails,
+	 * we can't be sure the returned status is for the r/w command.
+	 */
+	for (retry = 2; retry >= 0; retry--) {
+		err = get_card_status(card, &status, 0);
+		if (!err)
+			break;
+
+		prev_cmd_status_valid = false;
+		pr_err("%s: error %d sending status command, %sing\n",
+		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+	}
+
+	/* We couldn't get a response from the card.  Give up. */
+	if (err) {
+		/* Check if the card is removed */
+		if (mmc_detect_card_removed(card->host))
+			return ERR_NOMEDIUM;
+		return ERR_ABORT;
+	}
+
+	/* Flag ECC errors */
+	if ((status & R1_CARD_ECC_FAILED) ||
+	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
+	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+		*ecc_err = 1;
+
+	/* Flag General errors */
+	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+		if ((status & R1_ERROR) ||
+			(brq->stop.resp[0] & R1_ERROR)) {
+			pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+			       req->rq_disk->disk_name, __func__,
+			       brq->stop.resp[0], status);
+			*gen_err = 1;
+		}
+
+	/*
+	 * Check the current card state.  If it is in some data transfer
+	 * mode, tell it to stop (and hopefully transition back to TRAN.)
+	 */
+	if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+	    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+		err = send_stop(card, &stop_status);
+		if (err)
+			pr_err("%s: error %d sending stop command\n",
+			       req->rq_disk->disk_name, err);
+
+		/*
+		 * If the stop cmd also timed out, the card is probably
+		 * not present, so abort.  Other errors are bad news too.
+		 */
+		if (err)
+			return ERR_ABORT;
+		if (stop_status & R1_CARD_ECC_FAILED)
+			*ecc_err = 1;
+		if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+			if (stop_status & R1_ERROR) {
+				pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+				       req->rq_disk->disk_name, __func__,
+				       stop_status);
+				*gen_err = 1;
+			}
+	}
+
+	/* Check for set block count errors */
+	if (brq->sbc.error)
+		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+				prev_cmd_status_valid, status);
+
+	/* Check for r/w command errors */
+	if (brq->cmd.error)
+		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+				prev_cmd_status_valid, status);
+
+	/* Data errors */
+	if (!brq->stop.error)
+		return ERR_CONTINUE;
+
+	/* Now for stop errors.  These aren't fatal to the transfer. */
+	pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+	       req->rq_disk->disk_name, brq->stop.error,
+	       brq->cmd.resp[0], status);
+
+	/*
+	 * Subsitute in our own stop status as this will give the error
+	 * state which happened during the execution of the r/w command.
+	 */
+	if (stop_status) {
+		brq->stop.resp[0] = stop_status;
+		brq->stop.error = 0;
+	}
+	return ERR_CONTINUE;
+}
+
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+			 int type)
+{
+	int err;
+
+	if (md->reset_done & type)
+		return -EEXIST;
+
+	md->reset_done |= type;
+	err = mmc_hw_reset(host);
+	/* Ensure we switch back to the correct partition */
+	if (err != -EOPNOTSUPP) {
+		struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
+		int part_err;
+		
+		if(!main_md)
+			return -EINVAL;
+
+		main_md->part_curr = main_md->part_type;
+		part_err = mmc_blk_part_switch(host->card, md);
+		if (part_err) {
+			/*
+			 * We have failed to get back into the correct
+			 * partition, so we need to abort the whole request.
+			 */
+			return -ENODEV;
+		}
+	}
+	return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+	md->reset_done &= ~type;
+}
+
+static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	unsigned int from, nr, arg;
+	int err = 0, type = MMC_BLK_DISCARD;
+
+	if (!mmc_can_erase(card)) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	from = blk_rq_pos(req);
+	nr = blk_rq_sectors(req);
+
+	if (mmc_can_discard(card))
+		arg = MMC_DISCARD_ARG;
+	else if (mmc_can_trim(card))
+		arg = MMC_TRIM_ARG;
+	else
+		arg = MMC_ERASE_ARG;
+retry:
+	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 INAND_CMD38_ARG_EXT_CSD,
+				 arg == MMC_TRIM_ARG ?
+				 INAND_CMD38_ARG_TRIM :
+				 INAND_CMD38_ARG_ERASE,
+				 0);
+		if (err)
+			goto out;
+	}
+	err = mmc_erase(card, from, nr, arg);
+out:
+	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+		goto retry;
+	if (!err)
+		mmc_blk_reset_success(md, type);
+	spin_lock_irq(&md->lock);
+	__blk_end_request(req, err, blk_rq_bytes(req));
+	spin_unlock_irq(&md->lock);
+
+	return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+				       struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	unsigned int from, nr, arg, trim_arg, erase_arg;
+	int err = 0, type = MMC_BLK_SECDISCARD;
+
+	if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	from = blk_rq_pos(req);
+	nr = blk_rq_sectors(req);
+
+	/* The sanitize operation is supported at v4.5 only */
+	if (mmc_can_sanitize(card)) {
+		erase_arg = MMC_ERASE_ARG;
+		trim_arg = MMC_TRIM_ARG;
+	} else {
+		erase_arg = MMC_SECURE_ERASE_ARG;
+		trim_arg = MMC_SECURE_TRIM1_ARG;
+	}
+
+	if (mmc_erase_group_aligned(card, from, nr))
+		arg = erase_arg;
+	else if (mmc_can_trim(card))
+		arg = trim_arg;
+	else {
+		err = -EINVAL;
+		goto out;
+	}
+retry:
+	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 INAND_CMD38_ARG_EXT_CSD,
+				 arg == MMC_SECURE_TRIM1_ARG ?
+				 INAND_CMD38_ARG_SECTRIM1 :
+				 INAND_CMD38_ARG_SECERASE,
+				 0);
+		if (err)
+			goto out_retry;
+	}
+
+	err = mmc_erase(card, from, nr, arg);
+	if (err == -EIO)
+		goto out_retry;
+	if (err)
+		goto out;
+
+	if (arg == MMC_SECURE_TRIM1_ARG) {
+		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					 INAND_CMD38_ARG_EXT_CSD,
+					 INAND_CMD38_ARG_SECTRIM2,
+					 0);
+			if (err)
+				goto out_retry;
+		}
+
+		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+		if (err == -EIO)
+			goto out_retry;
+		if (err)
+			goto out;
+	}
+
+	if (mmc_can_sanitize(card))
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_SANITIZE_START, 1, 0);
+out_retry:
+	if (err && !mmc_blk_reset(md, card->host, type))
+		goto retry;
+	if (!err)
+		mmc_blk_reset_success(md, type);
+out:
+	spin_lock_irq(&md->lock);
+	__blk_end_request(req, err, blk_rq_bytes(req));
+	spin_unlock_irq(&md->lock);
+
+	return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	int ret = 0;
+
+	ret = mmc_flush_cache(card);
+	if (ret)
+		ret = -EIO;
+
+	spin_lock_irq(&md->lock);
+	__blk_end_request_all(req, ret);
+	spin_unlock_irq(&md->lock);
+
+	return ret ? 0 : 1;
+}
+
+/*
+ * Reformat current write as a reliable write, supporting
+ * both legacy and the enhanced reliable write MMC cards.
+ * In each transfer we'll handle only as much as a single
+ * reliable write can handle, thus finish the request in
+ * partial completions.
+ */
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+				    struct mmc_card *card,
+				    struct request *req)
+{
+	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+		/* Legacy mode imposes restrictions on transfers. */
+		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
+			brq->data.blocks = 1;
+
+		if (brq->data.blocks > card->ext_csd.rel_sectors)
+			brq->data.blocks = card->ext_csd.rel_sectors;
+		else if (brq->data.blocks < card->ext_csd.rel_sectors)
+			brq->data.blocks = 1;
+	}
+}
+
+#define CMD_ERRORS							\
+	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\
+	 R1_ADDRESS_ERROR |	/* Misaligned address */		\
+	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
+	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
+	 R1_CC_ERROR |		/* Card controller error */		\
+	 R1_ERROR)		/* General/unknown error */
+
+static int mmc_blk_err_check(struct mmc_card *card,
+			     struct mmc_async_req *areq)
+{
+	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+						    mmc_active);
+	struct mmc_blk_request *brq = &mq_mrq->brq;
+	struct request *req = mq_mrq->req;
+	int ecc_err = 0, gen_err = 0;
+//       int check_cnt = 100;
+	/*
+	 * sbc.error indicates a problem with the set block count
+	 * command.  No data will have been transferred.
+	 *
+	 * cmd.error indicates a problem with the r/w command.  No
+	 * data will have been transferred.
+	 *
+	 * stop.error indicates a problem with the stop command.  Data
+	 * may have been transferred, or may still be transferring.
+	 */
+		if (brq->sbc.error || brq->cmd.error ||
+			brq->stop.error || brq->data.error) {
+		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
+		case ERR_RETRY:
+			return MMC_BLK_RETRY;
+		case ERR_ABORT:
+			return MMC_BLK_ABORT;
+		case ERR_NOMEDIUM:
+			return MMC_BLK_NOMEDIUM;
+		case ERR_CONTINUE:
+			break;
+		}
+	}
+
+	/*
+	 * Check for errors relating to the execution of the
+	 * initial command - such as address errors.  No data
+	 * has been transferred.
+	 */
+	if (brq->cmd.resp[0] & CMD_ERRORS) {
+		pr_err("%s: r/w command failed, status = %#x\n",
+		       req->rq_disk->disk_name, brq->cmd.resp[0]);
+		return MMC_BLK_ABORT;
+	}
+
+	/*
+	 * Everything else is either success, or a data error of some
+	 * kind.  If it was a write, we may have transitioned to
+	 * program mode, which we have to wait for it to complete.
+	 */
+	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+		/* u32 status; */
+		int err;
+
+		/* Check stop command response */
+		if (brq->stop.resp[0] & R1_ERROR) {
+			pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+			       req->rq_disk->disk_name, __func__,
+			       brq->stop.resp[0]);
+			gen_err = 1;
+		}
+
+		err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
+					&gen_err);
+		if (err)
+			return MMC_BLK_CMD_ERR;
+	}
+
+	/* if general error occurs, retry the write operation. */
+	if (gen_err) {
+		pr_warning("%s: retrying write for general error\n",
+				req->rq_disk->disk_name);
+		return MMC_BLK_RETRY;
+	}
+
+	if (brq->data.error) {
+		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+		       req->rq_disk->disk_name, brq->data.error,
+		       (unsigned)blk_rq_pos(req),
+		       (unsigned)blk_rq_sectors(req),
+		       brq->cmd.resp[0], brq->stop.resp[0]);
+
+		if (rq_data_dir(req) == READ) {
+			if (ecc_err)
+				return MMC_BLK_ECC_ERR;
+			return MMC_BLK_DATA_ERR;
+		} else {
+			return MMC_BLK_CMD_ERR;
+		}
+	}
+
+	if (!brq->data.bytes_xfered)
+		return MMC_BLK_RETRY;
+
+	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
+		return MMC_BLK_PARTIAL;
+
+	return MMC_BLK_SUCCESS;
+}
+
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+			       struct mmc_card *card,
+			       int disable_multi,
+			       struct mmc_queue *mq)
+{
+	u32 readcmd, writecmd;
+	struct mmc_blk_request *brq = &mqrq->brq;
+	struct request *req = mqrq->req;
+	struct mmc_blk_data *md = mq->data;
+	bool do_data_tag;
+
+	/*
+	 * Reliable writes are used to implement Forced Unit Access and
+	 * REQ_META accesses, and are supported only on MMCs.
+	 *
+	 * XXX: this really needs a good explanation of why REQ_META
+	 * is treated special.
+	 */
+	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+			  (req->cmd_flags & REQ_META)) &&
+		(rq_data_dir(req) == WRITE) &&
+		(md->flags & MMC_BLK_REL_WR);
+
+	memset(brq, 0, sizeof(struct mmc_blk_request));
+	brq->mrq.cmd = &brq->cmd;
+	brq->mrq.data = &brq->data;
+
+	brq->cmd.arg = blk_rq_pos(req);
+	if (!mmc_card_blockaddr(card))
+		brq->cmd.arg <<= 9;
+	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+	brq->data.blksz = 512;
+	brq->stop.opcode = MMC_STOP_TRANSMISSION;
+	brq->stop.arg = 0;
+	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+	brq->data.blocks = blk_rq_sectors(req);
+
+	/*
+	 * The block layer doesn't support all sector count
+	 * restrictions, so we need to be prepared for too big
+	 * requests.
+	 */
+	if (brq->data.blocks > card->host->max_blk_count)
+		brq->data.blocks = card->host->max_blk_count;
+
+	if (brq->data.blocks > 1) {
+		/*
+		 * After a read error, we redo the request one sector
+		 * at a time in order to accurately determine which
+		 * sectors can be read successfully.
+		 */
+		if (disable_multi)
+			brq->data.blocks = 1;
+
+		/* Some controllers can't do multiblock reads due to hw bugs */
+		if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
+		    rq_data_dir(req) == READ)
+			brq->data.blocks = 1;
+	}
+
+	if (brq->data.blocks > 1 || do_rel_wr) {
+		/* SPI multiblock writes terminate using a special
+		 * token, not a STOP_TRANSMISSION request.
+		 */
+		if (!mmc_host_is_spi(card->host) ||
+		    rq_data_dir(req) == READ)
+			brq->mrq.stop = &brq->stop;
+		readcmd = MMC_READ_MULTIPLE_BLOCK;
+		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+	} else {
+		brq->mrq.stop = NULL;
+		readcmd = MMC_READ_SINGLE_BLOCK;
+		writecmd = MMC_WRITE_BLOCK;
+	}
+	if (rq_data_dir(req) == READ) {
+		brq->cmd.opcode = readcmd;
+		brq->data.flags |= MMC_DATA_READ;
+	} else {
+		brq->cmd.opcode = writecmd;
+		brq->data.flags |= MMC_DATA_WRITE;
+	}
+
+	if (do_rel_wr)
+		mmc_apply_rel_rw(brq, card, req);
+
+	/*
+	 * Data tag is used only during writing meta data to speed
+	 * up write and any subsequent read of this meta data
+	 */
+	do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+		(req->cmd_flags & REQ_META) &&
+		(rq_data_dir(req) == WRITE) &&
+		((brq->data.blocks * brq->data.blksz) >=
+		 card->ext_csd.data_tag_unit_size);
+
+	/*
+	 * Pre-defined multi-block transfers are preferable to
+	 * open ended-ones (and necessary for reliable writes).
+	 * However, it is not sufficient to just send CMD23,
+	 * and avoid the final CMD12, as on an error condition
+	 * CMD12 (stop) needs to be sent anyway. This, coupled
+	 * with Auto-CMD23 enhancements provided by some
+	 * hosts, means that the complexity of dealing
+	 * with this is best left to the host. If CMD23 is
+	 * supported by card and host, we'll fill sbc in and let
+	 * the host deal with handling it correctly. This means
+	 * that for hosts that don't expose MMC_CAP_CMD23, no
+	 * change of behavior will be observed.
+	 *
+	 * N.B: Some MMC cards experience perf degradation.
+	 * We'll avoid using CMD23-bounded multiblock writes for
+	 * these, while retaining features like reliable writes.
+	 */
+	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
+	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
+	     do_data_tag)) {
+		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+		brq->sbc.arg = brq->data.blocks |
+			(do_rel_wr ? (1 << 31) : 0) |
+			(do_data_tag ? (1 << 29) : 0);
+		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+		brq->mrq.sbc = &brq->sbc;
+	}
+
+	mmc_set_data_timeout(&brq->data, card);
+
+	brq->data.sg = mqrq->sg;
+	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+	/*
+	 * Adjust the sg list so it is the same size as the
+	 * request.
+	 */
+	if (brq->data.blocks != blk_rq_sectors(req)) {
+		int i, data_size = brq->data.blocks << 9;
+		struct scatterlist *sg;
+
+		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+			data_size -= sg->length;
+			if (data_size <= 0) {
+				sg->length += data_size;
+				i++;
+				break;
+			}
+		}
+		brq->data.sg_len = i;
+	}
+
+	mqrq->mmc_active.mrq = &brq->mrq;
+	mqrq->mmc_active.err_check = mmc_blk_err_check;
+
+	mmc_queue_bounce_pre(mqrq);
+}
+
+static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+			   struct mmc_blk_request *brq, struct request *req,
+			   int ret)
+{
+	/*
+	 * If this is an SD card and we're writing, we can first
+	 * mark the known good sectors as ok.
+	 *
+	 * If the card is not SD, we can still ok written sectors
+	 * as reported by the controller (which might be less than
+	 * the real number of written sectors, but never more).
+	 */
+	if (mmc_card_sd(card)) {
+		u32 blocks;
+
+		blocks = mmc_sd_num_wr_blocks(card);
+		if (blocks != (u32)-1) {
+			spin_lock_irq(&md->lock);
+			ret = __blk_end_request(req, 0, blocks << 9);
+			spin_unlock_irq(&md->lock);
+		}
+	} else {
+		spin_lock_irq(&md->lock);
+		ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
+		spin_unlock_irq(&md->lock);
+	}
+	return ret;
+}
+
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+	int ret = 1, disable_multi = 0, retry = 0, type;
+	enum mmc_blk_status status;
+	struct mmc_queue_req *mq_rq;
+	struct request *req;
+	struct mmc_async_req *areq;
+
+	if (!rqc && !mq->mqrq_prev->req)
+		return 0;
+
+	do {
+		if (rqc) {
+			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+			areq = &mq->mqrq_cur->mmc_active;
+		} else
+			areq = NULL;
+		areq = mmc_start_req(card->host, areq, (int *) &status);
+		if (!areq)
+			return 0;
+
+		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+		brq = &mq_rq->brq;
+		req = mq_rq->req;
+		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+		mmc_queue_bounce_post(mq_rq);
+
+		switch (status) {
+		case MMC_BLK_SUCCESS:
+		case MMC_BLK_PARTIAL:
+			/*
+			 * A block was successfully transferred.
+			 */
+			mmc_blk_reset_success(md, type);
+			spin_lock_irq(&md->lock);
+			ret = __blk_end_request(req, 0,
+						brq->data.bytes_xfered);
+			spin_unlock_irq(&md->lock);
+			/*
+			 * If the blk_end_request function returns non-zero even
+			 * though all data has been transferred and no errors
+			 * were returned by the host controller, it's a bug.
+			 */
+			if (status == MMC_BLK_SUCCESS && ret) {
+				pr_err("%s BUG rq_tot %d d_xfer %d\n",
+				       __func__, blk_rq_bytes(req),
+				       brq->data.bytes_xfered);
+				rqc = NULL;
+				goto cmd_abort;
+			}
+			break;
+		case MMC_BLK_CMD_ERR:
+			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+			if (mmc_blk_reset(md, card->host, type))
+				goto cmd_abort;
+			if (!ret)
+				goto start_new_req;
+			break;
+		case MMC_BLK_RETRY:
+			if (retry++ < 5)
+				break;
+			/* Fall through */
+		case MMC_BLK_ABORT:
+			if (!mmc_blk_reset(md, card->host, type))
+				break;
+			goto cmd_abort;
+		case MMC_BLK_DATA_ERR: {
+			int err;
+
+			err = mmc_blk_reset(md, card->host, type);
+			if (!err)
+				break;
+			if (err == -ENODEV)
+				goto cmd_abort;
+			/* Fall through */
+		}
+		case MMC_BLK_ECC_ERR:
+			if (brq->data.blocks > 1) {
+				/* Redo read one sector at a time */
+				pr_warning("%s: retrying using single block read\n",
+					   req->rq_disk->disk_name);
+				disable_multi = 1;
+				break;
+			}
+			/*
+			 * After an error, we redo I/O one sector at a
+			 * time, so we only reach here after trying to
+			 * read a single sector.
+			 */
+			spin_lock_irq(&md->lock);
+			ret = __blk_end_request(req, -EIO,
+						brq->data.blksz);
+			spin_unlock_irq(&md->lock);
+			if (!ret)
+				goto start_new_req;
+			break;
+		case MMC_BLK_NOMEDIUM:
+			goto cmd_abort;
+		}
+
+		if (ret) {
+			/*
+			 * In case of a incomplete request
+			 * prepare it again and resend.
+			 */
+			mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
+			mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+		}
+	} while (ret);
+
+	return 1;
+
+ cmd_abort:
+	spin_lock_irq(&md->lock);
+	if (mmc_card_removed(card))
+		req->cmd_flags |= REQ_QUIET;
+	while (ret)
+		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
+	spin_unlock_irq(&md->lock);
+
+ start_new_req:
+	if (rqc) {
+		mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+		mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+static int mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
+{
+	int err;
+
+	mmc_claim_host(card->host);
+	err = mmc_set_blocklen(card, 512);
+	mmc_release_host(card->host);
+
+	if (err) {
+		printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
+			md->disk->disk_name, err);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#endif
+
+static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+	int ret;
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(card->host)) {
+		mmc_resume_bus(card->host);
+		mmc_blk_set_blksize(md, card);
+	}
+#endif
+
+	if (req && !mq->mqrq_prev->req)
+		/* claim host only for the first request */
+		mmc_claim_host(card->host);
+
+	ret = mmc_blk_part_switch(card, md);
+	if (ret) {
+		if (req) {
+			spin_lock_irq(&md->lock);
+			__blk_end_request_all(req, -EIO);
+			spin_unlock_irq(&md->lock);
+		}
+		ret = 0;
+		goto out;
+	}
+
+	if (req && req->cmd_flags & REQ_DISCARD) {
+		/* complete ongoing async transfer before issuing discard */
+		if (card->host->areq)
+			mmc_blk_issue_rw_rq(mq, NULL);
+		if (req->cmd_flags & REQ_SECURE &&
+			!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+			ret = mmc_blk_issue_secdiscard_rq(mq, req);
+		else
+			ret = mmc_blk_issue_discard_rq(mq, req);
+	} else if (req && req->cmd_flags & REQ_FLUSH) {
+		/* complete ongoing async transfer before issuing flush */
+		if (card->host->areq)
+			mmc_blk_issue_rw_rq(mq, NULL);
+		ret = mmc_blk_issue_flush(mq, req);
+	} else {
+		ret = mmc_blk_issue_rw_rq(mq, req);
+	}
+
+out:
+	if (!req)
+		/* release host only when there are no more requests */
+		mmc_release_host(card->host);
+	return ret;
+}
+
+static inline int mmc_blk_readonly(struct mmc_card *card)
+{
+	return mmc_card_readonly(card) ||
+	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+					      struct device *parent,
+					      sector_t size,
+					      bool default_ro,
+					      const char *subname,
+					      int area_type)
+{
+	struct mmc_blk_data *md;
+	int devidx, ret;
+
+	devidx = find_first_zero_bit(dev_use, max_devices);
+	if (devidx >= max_devices)
+		return ERR_PTR(-ENOSPC);
+	__set_bit(devidx, dev_use);
+
+	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+	if (!md) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * !subname implies we are creating main mmc_blk_data that will be
+	 * associated with mmc_card with mmc_set_drvdata. Due to device
+	 * partitions, devidx will not coincide with a per-physical card
+	 * index anymore so we keep track of a name index.
+	 */
+	if (!subname) {
+		md->name_idx = find_first_zero_bit(name_use, max_devices);
+		__set_bit(md->name_idx, name_use);
+	} else
+		md->name_idx = ((struct mmc_blk_data *)
+				dev_to_disk(parent)->private_data)->name_idx;
+
+	md->area_type = area_type;
+
+	/*
+	 * Set the read-only status based on the supported commands
+	 * and the write protect switch.
+	 */
+	md->read_only = mmc_blk_readonly(card);
+
+	md->disk = alloc_disk(perdev_minors);
+	if (md->disk == NULL) {
+		ret = -ENOMEM;
+		goto err_kfree;
+	}
+
+	spin_lock_init(&md->lock);
+	INIT_LIST_HEAD(&md->part);
+	md->usage = 1;
+
+	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+	if (ret)
+		goto err_putdisk;
+
+	md->queue.issue_fn = mmc_blk_issue_rq;
+	md->queue.data = md;
+
+	md->disk->major	= MMC_BLOCK_MAJOR;
+	md->disk->first_minor = devidx * perdev_minors;
+	md->disk->fops = &mmc_bdops;
+	md->disk->private_data = md;
+	md->disk->queue = md->queue.queue;
+	md->disk->driverfs_dev = parent;
+	set_disk_ro(md->disk, md->read_only || default_ro);
+	md->disk->flags = GENHD_FL_EXT_DEVT;
+
+	/*
+	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
+	 *
+	 * - be set for removable media with permanent block devices
+	 * - be unset for removable block devices with permanent media
+	 *
+	 * Since MMC block devices clearly fall under the second
+	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
+	 * should use the block device creation/destruction hotplug
+	 * messages to tell when the card is present.
+	 */
+
+	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+		 "mmcblk%d%s", md->name_idx, subname ? subname : "");
+
+	blk_queue_logical_block_size(md->queue.queue, 512);
+	set_capacity(md->disk, size);
+
+	if (mmc_host_cmd23(card->host)) {
+		if (mmc_card_mmc(card) ||
+		    (mmc_card_sd(card) &&
+		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+			md->flags |= MMC_BLK_CMD23;
+	}
+
+	if (mmc_card_mmc(card) &&
+	    md->flags & MMC_BLK_CMD23 &&
+	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+	     card->ext_csd.rel_sectors)) {
+		md->flags |= MMC_BLK_REL_WR;
+		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+	}
+
+	return md;
+
+ err_putdisk:
+	put_disk(md->disk);
+ err_kfree:
+	kfree(md);
+ out:
+	return ERR_PTR(ret);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+	sector_t size;
+	struct mmc_blk_data *md;
+
+	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
+		/*
+		 * The EXT_CSD sector count is in number or 512 byte
+		 * sectors.
+		 */
+		size = card->ext_csd.sectors;
+	} else {
+		/*
+		 * The CSD capacity field is in units of read_blkbits.
+		 * set_capacity takes units of 512 bytes.
+		 */
+		size = (sector_t)card->csd.capacity << (card->csd.read_blkbits - 9);
+	}
+
+	md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
+					MMC_BLK_DATA_AREA_MAIN);
+	return md;
+}
+
+static int mmc_blk_alloc_part(struct mmc_card *card,
+			      struct mmc_blk_data *md,
+			      unsigned int part_type,
+			      sector_t size,
+			      bool default_ro,
+			      const char *subname,
+			      int area_type)
+{
+	char cap_str[10];
+	struct mmc_blk_data *part_md;
+
+	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
+				    subname, area_type);
+	if (IS_ERR(part_md))
+		return PTR_ERR(part_md);
+	part_md->part_type = part_type;
+	list_add(&part_md->part, &md->part);
+
+	string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
+			cap_str, sizeof(cap_str));
+	pr_info("%s: %s %s partition %u %s\n",
+	       part_md->disk->disk_name, mmc_card_id(card),
+	       mmc_card_name(card), part_md->part_type, cap_str);
+	return 0;
+}
+
+/* MMC Physical partitions consist of two boot partitions and
+ * up to four general purpose partitions.
+ * For each partition enabled in EXT_CSD a block device will be allocatedi
+ * to provide access to the partition.
+ */
+
+static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
+{
+	int idx, ret = 0;
+
+	if (!mmc_card_mmc(card))
+		return 0;
+
+	for (idx = 0; idx < card->nr_parts; idx++) {
+		if (card->part[idx].size) {
+			ret = mmc_blk_alloc_part(card, md,
+				card->part[idx].part_cfg,
+				card->part[idx].size >> 9,
+				card->part[idx].force_ro,
+				card->part[idx].name,
+				card->part[idx].area_type);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return ret;
+}
+
+static void mmc_blk_remove_req(struct mmc_blk_data *md)
+{
+	struct mmc_card *card;
+
+	if (md) {
+		card = md->queue.card;
+		if (md->disk->flags & GENHD_FL_UP) {
+			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+					card->ext_csd.boot_ro_lockable)
+				device_remove_file(disk_to_dev(md->disk),
+					&md->power_ro_lock);
+
+			/* Stop new requests from getting into the queue */
+			del_gendisk(md->disk);
+		}
+
+		/* Then flush out any already in there */
+		mmc_cleanup_queue(&md->queue);
+		mmc_blk_put(md);
+	}
+}
+
+static void mmc_blk_remove_parts(struct mmc_card *card,
+				 struct mmc_blk_data *md)
+{
+	struct list_head *pos, *q;
+	struct mmc_blk_data *part_md;
+
+	__clear_bit(md->name_idx, name_use);
+	list_for_each_safe(pos, q, &md->part) {
+		part_md = list_entry(pos, struct mmc_blk_data, part);
+		list_del(pos);
+		mmc_blk_remove_req(part_md);
+	}
+}
+
+static int mmc_add_disk(struct mmc_blk_data *md)
+{
+	int ret;
+	struct mmc_card *card = md->queue.card;
+
+	add_disk(md->disk);
+	md->force_ro.show = force_ro_show;
+	md->force_ro.store = force_ro_store;
+	sysfs_attr_init(&md->force_ro.attr);
+	md->force_ro.attr.name = "force_ro";
+	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
+	if (ret)
+		goto force_ro_fail;
+
+	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+	     card->ext_csd.boot_ro_lockable) {
+		umode_t mode;
+
+		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
+			mode = S_IRUGO;
+		else
+			mode = S_IRUGO | S_IWUSR;
+
+		md->power_ro_lock.show = power_ro_lock_show;
+		md->power_ro_lock.store = power_ro_lock_store;
+		sysfs_attr_init(&md->power_ro_lock.attr);
+		md->power_ro_lock.attr.mode = mode;
+		md->power_ro_lock.attr.name =
+					"ro_lock_until_next_power_on";
+		ret = device_create_file(disk_to_dev(md->disk),
+				&md->power_ro_lock);
+		if (ret)
+			goto power_ro_lock_fail;
+	}
+	return ret;
+
+power_ro_lock_fail:
+	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+force_ro_fail:
+	del_gendisk(md->disk);
+
+	return ret;
+}
+
+#define CID_MANFID_SANDISK	0x2
+#define CID_MANFID_TOSHIBA	0x11
+#define CID_MANFID_MICRON	0x13
+#define CID_MANFID_SAMSUNG	0x15
+
+static const struct mmc_fixup blk_fixups[] =
+{
+	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+
+	/*
+	 * Some MMC cards experience performance degradation with CMD23
+	 * instead of CMD12-bounded multiblock transfers. For now we'll
+	 * black list what's bad...
+	 * - Certain Toshiba cards.
+	 *
+	 * N.B. This doesn't affect SD cards.
+	 */
+	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+
+	/*
+	 * Some Micron MMC cards needs longer data read timeout than
+	 * indicated in CSD.
+	 */
+	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
+	/*
+	 * On these Samsung MoviNAND parts, performing secure erase or
+	 * secure trim can result in unrecoverable corruption due to a
+	 * firmware bug.
+	 */
+	MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
+	END_FIXUP
+};
+
+static int mmc_blk_probe(struct mmc_card *card)
+{
+	struct mmc_blk_data *md, *part_md;
+	char cap_str[10];
+
+	/*
+	 * Check that the card supports the command class(es) we need.
+	 */
+	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
+		return -ENODEV;
+
+	md = mmc_blk_alloc(card);
+	if (IS_ERR(md))
+		return PTR_ERR(md);
+
+	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
+			cap_str, sizeof(cap_str));
+	pr_info("%s: %s %s %s %s\n",
+		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
+		cap_str, md->read_only ? "(ro)" : "");
+
+	if (mmc_blk_alloc_parts(card, md))
+		goto out;
+
+	mmc_set_drvdata(card, md);
+	mmc_fixup_device(card, blk_fixups);
+
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 1);
+#endif
+	if (mmc_add_disk(md))
+		goto out;
+
+	list_for_each_entry(part_md, &md->part, part) {
+		if (mmc_add_disk(part_md))
+			goto out;
+	}
+	return 0;
+
+ out:
+	mmc_blk_remove_parts(card, md);
+	mmc_blk_remove_req(md);
+	return 0;
+}
+
+static void mmc_blk_remove(struct mmc_card *card)
+{
+	struct mmc_blk_data *md = mmc_get_drvdata(card);
+	
+	if(!md)
+		return;
+
+	mmc_blk_remove_parts(card, md);
+	mmc_claim_host(card->host);
+	mmc_blk_part_switch(card, md);
+	mmc_release_host(card->host);
+	mmc_blk_remove_req(md);
+	mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 0);
+#endif
+}
+
+#ifdef CONFIG_PM
+static int mmc_blk_suspend(struct mmc_card *card)
+{
+	struct mmc_blk_data *part_md;
+	struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+	if (md) {
+		mmc_queue_suspend(&md->queue);
+		list_for_each_entry(part_md, &md->part, part) {
+			mmc_queue_suspend(&part_md->queue);
+		}
+	}
+	return 0;
+}
+
+static int mmc_blk_resume(struct mmc_card *card)
+{
+	struct mmc_blk_data *part_md;
+	struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+	if (md) {
+		/*
+		 * Resume involves the card going into idle state,
+		 * so current partition is always the main one.
+		 */
+		md->part_curr = md->part_type;
+		mmc_queue_resume(&md->queue);
+		list_for_each_entry(part_md, &md->part, part) {
+			mmc_queue_resume(&part_md->queue);
+		}
+	}
+	return 0;
+}
+#else
+#define	mmc_blk_suspend	NULL
+#define mmc_blk_resume	NULL
+#endif
+
+static struct mmc_driver mmc_driver = {
+	.drv		= {
+		.name	= "mmcblk",
+	},
+	.probe		= mmc_blk_probe,
+	.remove		= mmc_blk_remove,
+	.suspend	= mmc_blk_suspend,
+	.resume		= mmc_blk_resume,
+};
+
+static int __init mmc_blk_init(void)
+{
+	int res;
+
+	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
+		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
+
+	max_devices = 256 / perdev_minors;
+
+	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
+	if (res)
+		goto out;
+
+	res = mmc_register_driver(&mmc_driver);
+	if (res)
+		goto out2;
+
+	return 0;
+ out2:
+	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ out:
+	return res;
+}
+
+static void __exit mmc_blk_exit(void)
+{
+	mmc_unregister_driver(&mmc_driver);
+	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+}
+
+module_init(mmc_blk_init);
+module_exit(mmc_blk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/card/mmc_test.c b/ap/os/linux/linux-3.4.x/drivers/mmc/card/mmc_test.c
new file mode 100644
index 0000000..759714e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/card/mmc_test.c
@@ -0,0 +1,3055 @@
+/*
+ *  linux/drivers/mmc/card/mmc_test.c
+ *
+ *  Copyright 2007-2008 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/slab.h>
+
+#include <linux/scatterlist.h>
+#include <linux/swap.h>		/* For nr_free_buffer_pages() */
+#include <linux/list.h>
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+
+#define RESULT_OK		0
+#define RESULT_FAIL		1
+#define RESULT_UNSUP_HOST	2
+#define RESULT_UNSUP_CARD	3
+
+#define BUFFER_ORDER		2
+#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
+
+/*
+ * Limit the test area size to the maximum MMC HC erase group size.  Note that
+ * the maximum SD allocation unit size is just 4MiB.
+ */
+#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
+
+/**
+ * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
+ * @page: first page in the allocation
+ * @order: order of the number of pages allocated
+ */
+struct mmc_test_pages {
+	struct page *page;
+	unsigned int order;
+};
+
+/**
+ * struct mmc_test_mem - allocated memory.
+ * @arr: array of allocations
+ * @cnt: number of allocations
+ */
+struct mmc_test_mem {
+	struct mmc_test_pages *arr;
+	unsigned int cnt;
+};
+
+/**
+ * struct mmc_test_area - information for performance tests.
+ * @max_sz: test area size (in bytes)
+ * @dev_addr: address on card at which to do performance tests
+ * @max_tfr: maximum transfer size allowed by driver (in bytes)
+ * @max_segs: maximum segments allowed by driver in scatterlist @sg
+ * @max_seg_sz: maximum segment size allowed by driver
+ * @blocks: number of (512 byte) blocks currently mapped by @sg
+ * @sg_len: length of currently mapped scatterlist @sg
+ * @mem: allocated memory
+ * @sg: scatterlist
+ */
+struct mmc_test_area {
+	unsigned long max_sz;
+	unsigned int dev_addr;
+	unsigned int max_tfr;
+	unsigned int max_segs;
+	unsigned int max_seg_sz;
+	unsigned int blocks;
+	unsigned int sg_len;
+	struct mmc_test_mem *mem;
+	struct scatterlist *sg;
+};
+
+/**
+ * struct mmc_test_transfer_result - transfer results for performance tests.
+ * @link: double-linked list
+ * @count: amount of group of sectors to check
+ * @sectors: amount of sectors to check in one group
+ * @ts: time values of transfer
+ * @rate: calculated transfer rate
+ * @iops: I/O operations per second (times 100)
+ */
+struct mmc_test_transfer_result {
+	struct list_head link;
+	unsigned int count;
+	unsigned int sectors;
+	struct timespec ts;
+	unsigned int rate;
+	unsigned int iops;
+};
+
+/**
+ * struct mmc_test_general_result - results for tests.
+ * @link: double-linked list
+ * @card: card under test
+ * @testcase: number of test case
+ * @result: result of test run
+ * @tr_lst: transfer measurements if any as mmc_test_transfer_result
+ */
+struct mmc_test_general_result {
+	struct list_head link;
+	struct mmc_card *card;
+	int testcase;
+	int result;
+	struct list_head tr_lst;
+};
+
+/**
+ * struct mmc_test_dbgfs_file - debugfs related file.
+ * @link: double-linked list
+ * @card: card under test
+ * @file: file created under debugfs
+ */
+struct mmc_test_dbgfs_file {
+	struct list_head link;
+	struct mmc_card *card;
+	struct dentry *file;
+};
+
+/**
+ * struct mmc_test_card - test information.
+ * @card: card under test
+ * @scratch: transfer buffer
+ * @buffer: transfer buffer
+ * @highmem: buffer for highmem tests
+ * @area: information for performance tests
+ * @gr: pointer to results of current testcase
+ */
+struct mmc_test_card {
+	struct mmc_card	*card;
+
+	u8		scratch[BUFFER_SIZE];
+	u8		*buffer;
+#ifdef CONFIG_HIGHMEM
+	struct page	*highmem;
+#endif
+	struct mmc_test_area		area;
+	struct mmc_test_general_result	*gr;
+};
+
+enum mmc_test_prep_media {
+	MMC_TEST_PREP_NONE = 0,
+	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
+	MMC_TEST_PREP_ERASE = 1 << 1,
+};
+
+struct mmc_test_multiple_rw {
+	unsigned int *sg_len;
+	unsigned int *bs;
+	unsigned int len;
+	unsigned int size;
+	bool do_write;
+	bool do_nonblock_req;
+	enum mmc_test_prep_media prepare;
+};
+
+struct mmc_test_async_req {
+	struct mmc_async_req areq;
+	struct mmc_test_card *test;
+};
+
+/*******************************************************************/
+/*  General helper functions                                       */
+/*******************************************************************/
+
+/*
+ * Configure correct block size in card
+ */
+static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
+{
+	return mmc_set_blocklen(test->card, size);
+}
+
+/*
+ * Fill in the mmc_request structure given a set of transfer parameters.
+ */
+static void mmc_test_prepare_mrq(struct mmc_test_card *test,
+	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
+	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
+{
+	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
+
+	if (blocks > 1) {
+		mrq->cmd->opcode = write ?
+			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
+	} else {
+		mrq->cmd->opcode = write ?
+			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+	}
+
+	mrq->cmd->arg = dev_addr;
+	if (!mmc_card_blockaddr(test->card))
+		mrq->cmd->arg <<= 9;
+
+	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	if (blocks == 1)
+		mrq->stop = NULL;
+	else {
+		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
+		mrq->stop->arg = 0;
+		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+	}
+
+	mrq->data->blksz = blksz;
+	mrq->data->blocks = blocks;
+	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+	mrq->data->sg = sg;
+	mrq->data->sg_len = sg_len;
+
+	mmc_set_data_timeout(mrq->data, test->card);
+}
+
+static int mmc_test_busy(struct mmc_command *cmd)
+{
+	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
+		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
+}
+
+/*
+ * Wait for the card to finish the busy state
+ */
+static int mmc_test_wait_busy(struct mmc_test_card *test)
+{
+	int ret, busy;
+	struct mmc_command cmd = {0};
+
+	busy = 0;
+	do {
+		memset(&cmd, 0, sizeof(struct mmc_command));
+
+		cmd.opcode = MMC_SEND_STATUS;
+		cmd.arg = test->card->rca << 16;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
+		if (ret)
+			break;
+
+		if (!busy && mmc_test_busy(&cmd)) {
+			busy = 1;
+			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+				pr_info("%s: Warning: Host did not "
+					"wait for busy state to end.\n",
+					mmc_hostname(test->card->host));
+		}
+	} while (mmc_test_busy(&cmd));
+
+	return ret;
+}
+
+/*
+ * Transfer a single sector of kernel addressable data
+ */
+static int mmc_test_buffer_transfer(struct mmc_test_card *test,
+	u8 *buffer, unsigned addr, unsigned blksz, int write)
+{
+	int ret;
+
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
+
+	struct scatterlist sg;
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	mrq.stop = &stop;
+
+	sg_init_one(&sg, buffer, blksz);
+
+	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
+
+	mmc_wait_for_req(test->card->host, &mrq);
+
+	if (cmd.error)
+		return cmd.error;
+	if (data.error)
+		return data.error;
+
+	ret = mmc_test_wait_busy(test);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void mmc_test_free_mem(struct mmc_test_mem *mem)
+{
+	if (!mem)
+		return;
+	while (mem->cnt--)
+		__free_pages(mem->arr[mem->cnt].page,
+			     mem->arr[mem->cnt].order);
+	kfree(mem->arr);
+	kfree(mem);
+}
+
+/*
+ * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
+ * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
+ * not exceed a maximum number of segments and try not to make segments much
+ * bigger than maximum segment size.
+ */
+static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
+					       unsigned long max_sz,
+					       unsigned int max_segs,
+					       unsigned int max_seg_sz)
+{
+	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
+	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
+	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
+	unsigned long page_cnt = 0;
+	unsigned long limit = nr_free_buffer_pages() >> 4;
+	struct mmc_test_mem *mem;
+
+	if (max_page_cnt > limit)
+		max_page_cnt = limit;
+	if (min_page_cnt > max_page_cnt)
+		min_page_cnt = max_page_cnt;
+
+	if (max_seg_page_cnt > max_page_cnt)
+		max_seg_page_cnt = max_page_cnt;
+
+	if (max_segs > max_page_cnt)
+		max_segs = max_page_cnt;
+
+	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
+	if (!mem)
+		return NULL;
+
+	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
+			   GFP_KERNEL);
+	if (!mem->arr)
+		goto out_free;
+
+	while (max_page_cnt) {
+		struct page *page;
+		unsigned int order;
+		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
+				__GFP_NORETRY;
+
+		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
+		while (1) {
+			page = alloc_pages(flags, order);
+			if (page || !order)
+				break;
+			order -= 1;
+		}
+		if (!page) {
+			if (page_cnt < min_page_cnt)
+				goto out_free;
+			break;
+		}
+		mem->arr[mem->cnt].page = page;
+		mem->arr[mem->cnt].order = order;
+		mem->cnt += 1;
+		if (max_page_cnt <= (1UL << order))
+			break;
+		max_page_cnt -= 1UL << order;
+		page_cnt += 1UL << order;
+		if (mem->cnt >= max_segs) {
+			if (page_cnt < min_page_cnt)
+				goto out_free;
+			break;
+		}
+	}
+
+	return mem;
+
+out_free:
+	mmc_test_free_mem(mem);
+	return NULL;
+}
+
+/*
+ * Map memory into a scatterlist.  Optionally allow the same memory to be
+ * mapped more than once.
+ */
+static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
+			   struct scatterlist *sglist, int repeat,
+			   unsigned int max_segs, unsigned int max_seg_sz,
+			   unsigned int *sg_len, int min_sg_len)
+{
+	struct scatterlist *sg = NULL;
+	unsigned int i;
+	unsigned long sz = size;
+
+	sg_init_table(sglist, max_segs);
+	if (min_sg_len > max_segs)
+		min_sg_len = max_segs;
+
+	*sg_len = 0;
+	do {
+		for (i = 0; i < mem->cnt; i++) {
+			unsigned long len = PAGE_SIZE << mem->arr[i].order;
+
+			if (min_sg_len && (size / min_sg_len < len))
+				len = ALIGN(size / min_sg_len, 512);
+			if (len > sz)
+				len = sz;
+			if (len > max_seg_sz)
+				len = max_seg_sz;
+			if (sg)
+				sg = sg_next(sg);
+			else
+				sg = sglist;
+			if (!sg)
+				return -EINVAL;
+			sg_set_page(sg, mem->arr[i].page, len, 0);
+			sz -= len;
+			*sg_len += 1;
+			if (!sz)
+				break;
+		}
+	} while (sz && repeat);
+
+	if (sz)
+		return -EINVAL;
+
+	if (sg)
+		sg_mark_end(sg);
+
+	return 0;
+}
+
+/*
+ * Map memory into a scatterlist so that no pages are contiguous.  Allow the
+ * same memory to be mapped more than once.
+ */
+static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
+				       unsigned long sz,
+				       struct scatterlist *sglist,
+				       unsigned int max_segs,
+				       unsigned int max_seg_sz,
+				       unsigned int *sg_len)
+{
+	struct scatterlist *sg = NULL;
+	unsigned int i = mem->cnt, cnt;
+	unsigned long len;
+	void *base, *addr, *last_addr = NULL;
+
+	sg_init_table(sglist, max_segs);
+
+	*sg_len = 0;
+	while (sz) {
+		base = page_address(mem->arr[--i].page);
+		cnt = 1 << mem->arr[i].order;
+		while (sz && cnt) {
+			addr = base + PAGE_SIZE * --cnt;
+			if (last_addr && last_addr + PAGE_SIZE == addr)
+				continue;
+			last_addr = addr;
+			len = PAGE_SIZE;
+			if (len > max_seg_sz)
+				len = max_seg_sz;
+			if (len > sz)
+				len = sz;
+			if (sg)
+				sg = sg_next(sg);
+			else
+				sg = sglist;
+			if (!sg)
+				return -EINVAL;
+			sg_set_page(sg, virt_to_page(addr), len, 0);
+			sz -= len;
+			*sg_len += 1;
+		}
+		if (i == 0)
+			i = mem->cnt;
+	}
+
+	if (sg)
+		sg_mark_end(sg);
+
+	return 0;
+}
+
+/*
+ * Calculate transfer rate in bytes per second.
+ */
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
+{
+	uint64_t ns;
+
+	ns = ts->tv_sec;
+	ns *= 1000000000;
+	ns += ts->tv_nsec;
+
+	bytes *= 1000000000;
+
+	while (ns > UINT_MAX) {
+		bytes >>= 1;
+		ns >>= 1;
+	}
+
+	if (!ns)
+		return 0;
+
+	do_div(bytes, (uint32_t)ns);
+
+	return bytes;
+}
+
+/*
+ * Save transfer results for future usage
+ */
+static void mmc_test_save_transfer_result(struct mmc_test_card *test,
+	unsigned int count, unsigned int sectors, struct timespec ts,
+	unsigned int rate, unsigned int iops)
+{
+	struct mmc_test_transfer_result *tr;
+
+	if (!test->gr)
+		return;
+
+	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
+	if (!tr)
+		return;
+
+	tr->count = count;
+	tr->sectors = sectors;
+	tr->ts = ts;
+	tr->rate = rate;
+	tr->iops = iops;
+
+	list_add_tail(&tr->link, &test->gr->tr_lst);
+}
+
+/*
+ * Print the transfer rate.
+ */
+static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
+				struct timespec *ts1, struct timespec *ts2)
+{
+	unsigned int rate, iops, sectors = bytes >> 9;
+	struct timespec ts;
+
+	ts = timespec_sub(*ts2, *ts1);
+
+	rate = mmc_test_rate(bytes, &ts);
+	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
+
+	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
+			 mmc_hostname(test->card->host), sectors, sectors >> 1,
+			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
+			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+			 iops / 100, iops % 100);
+
+	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
+}
+
+/*
+ * Print the average transfer rate.
+ */
+static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
+				    unsigned int count, struct timespec *ts1,
+				    struct timespec *ts2)
+{
+	unsigned int rate, iops, sectors = bytes >> 9;
+	uint64_t tot = bytes * count;
+	struct timespec ts;
+
+	ts = timespec_sub(*ts2, *ts1);
+
+	rate = mmc_test_rate(tot, &ts);
+	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
+
+	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
+			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+			 "%u.%02u IOPS, sg_len %d)\n",
+			 mmc_hostname(test->card->host), count, sectors, count,
+			 sectors >> 1, (sectors & 1 ? ".5" : ""),
+			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
+			 rate / 1000, rate / 1024, iops / 100, iops % 100,
+			 test->area.sg_len);
+
+	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
+}
+
+/*
+ * Return the card size in sectors.
+ */
+static unsigned int mmc_test_capacity(struct mmc_card *card)
+{
+	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
+		return card->ext_csd.sectors;
+	else
+		return card->csd.capacity << (card->csd.read_blkbits - 9);
+}
+
+/*******************************************************************/
+/*  Test preparation and cleanup                                   */
+/*******************************************************************/
+
+/*
+ * Fill the first couple of sectors of the card with known data
+ * so that bad reads/writes can be detected
+ */
+static int __mmc_test_prepare(struct mmc_test_card *test, int write)
+{
+	int ret, i;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	if (write)
+		memset(test->buffer, 0xDF, 512);
+	else {
+		for (i = 0;i < 512;i++)
+			test->buffer[i] = i;
+	}
+
+	for (i = 0;i < BUFFER_SIZE / 512;i++) {
+		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_prepare_write(struct mmc_test_card *test)
+{
+	return __mmc_test_prepare(test, 1);
+}
+
+static int mmc_test_prepare_read(struct mmc_test_card *test)
+{
+	return __mmc_test_prepare(test, 0);
+}
+
+static int mmc_test_cleanup(struct mmc_test_card *test)
+{
+	int ret, i;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	memset(test->buffer, 0, 512);
+
+	for (i = 0;i < BUFFER_SIZE / 512;i++) {
+		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/*******************************************************************/
+/*  Test execution helpers                                         */
+/*******************************************************************/
+
+/*
+ * Modifies the mmc_request to perform the "short transfer" tests
+ */
+static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
+	struct mmc_request *mrq, int write)
+{
+	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+
+	if (mrq->data->blocks > 1) {
+		mrq->cmd->opcode = write ?
+			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+		mrq->stop = NULL;
+	} else {
+		mrq->cmd->opcode = MMC_SEND_STATUS;
+		mrq->cmd->arg = test->card->rca << 16;
+	}
+}
+
+/*
+ * Checks that a normal transfer didn't have any errors
+ */
+static int mmc_test_check_result(struct mmc_test_card *test,
+				 struct mmc_request *mrq)
+{
+	int ret;
+
+	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+
+	ret = 0;
+
+	if (!ret && mrq->cmd->error)
+		ret = mrq->cmd->error;
+	if (!ret && mrq->data->error)
+		ret = mrq->data->error;
+	if (!ret && mrq->stop && mrq->stop->error)
+		ret = mrq->stop->error;
+	if (!ret && mrq->data->bytes_xfered !=
+		mrq->data->blocks * mrq->data->blksz)
+		ret = RESULT_FAIL;
+
+	if (ret == -EINVAL)
+		ret = RESULT_UNSUP_HOST;
+
+	return ret;
+}
+
+static int mmc_test_check_result_async(struct mmc_card *card,
+				       struct mmc_async_req *areq)
+{
+	struct mmc_test_async_req *test_async =
+		container_of(areq, struct mmc_test_async_req, areq);
+
+	mmc_test_wait_busy(test_async->test);
+
+	return mmc_test_check_result(test_async->test, areq->mrq);
+}
+
+/*
+ * Checks that a "short transfer" behaved as expected
+ */
+static int mmc_test_check_broken_result(struct mmc_test_card *test,
+	struct mmc_request *mrq)
+{
+	int ret;
+
+	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+
+	ret = 0;
+
+	if (!ret && mrq->cmd->error)
+		ret = mrq->cmd->error;
+	if (!ret && mrq->data->error == 0)
+		ret = RESULT_FAIL;
+	if (!ret && mrq->data->error != -ETIMEDOUT)
+		ret = mrq->data->error;
+	if (!ret && mrq->stop && mrq->stop->error)
+		ret = mrq->stop->error;
+	if (mrq->data->blocks > 1) {
+		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
+			ret = RESULT_FAIL;
+	} else {
+		if (!ret && mrq->data->bytes_xfered > 0)
+			ret = RESULT_FAIL;
+	}
+
+	if (ret == -EINVAL)
+		ret = RESULT_UNSUP_HOST;
+
+	return ret;
+}
+
+/*
+ * Tests nonblock transfer with certain parameters
+ */
+static void mmc_test_nonblock_reset(struct mmc_request *mrq,
+				    struct mmc_command *cmd,
+				    struct mmc_command *stop,
+				    struct mmc_data *data)
+{
+	memset(mrq, 0, sizeof(struct mmc_request));
+	memset(cmd, 0, sizeof(struct mmc_command));
+	memset(data, 0, sizeof(struct mmc_data));
+	memset(stop, 0, sizeof(struct mmc_command));
+
+	mrq->cmd = cmd;
+	mrq->data = data;
+	mrq->stop = stop;
+}
+static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+				      struct scatterlist *sg, unsigned sg_len,
+				      unsigned dev_addr, unsigned blocks,
+				      unsigned blksz, int write, int count)
+{
+	struct mmc_request mrq1;
+	struct mmc_command cmd1;
+	struct mmc_command stop1;
+	struct mmc_data data1;
+
+	struct mmc_request mrq2;
+	struct mmc_command cmd2;
+	struct mmc_command stop2;
+	struct mmc_data data2;
+
+	struct mmc_test_async_req test_areq[2];
+	struct mmc_async_req *done_areq;
+	struct mmc_async_req *cur_areq = &test_areq[0].areq;
+	struct mmc_async_req *other_areq = &test_areq[1].areq;
+	int i;
+	int ret;
+
+	test_areq[0].test = test;
+	test_areq[1].test = test;
+
+	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
+	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
+
+	cur_areq->mrq = &mrq1;
+	cur_areq->err_check = mmc_test_check_result_async;
+	other_areq->mrq = &mrq2;
+	other_areq->err_check = mmc_test_check_result_async;
+
+	for (i = 0; i < count; i++) {
+		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
+				     blocks, blksz, write);
+		done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
+
+		if (ret || (!done_areq && i > 0))
+			goto err;
+
+		if (done_areq) {
+			if (done_areq->mrq == &mrq2)
+				mmc_test_nonblock_reset(&mrq2, &cmd2,
+							&stop2, &data2);
+			else
+				mmc_test_nonblock_reset(&mrq1, &cmd1,
+							&stop1, &data1);
+		}
+		done_areq = cur_areq;
+		cur_areq = other_areq;
+		other_areq = done_areq;
+		dev_addr += blocks;
+	}
+
+	done_areq = mmc_start_req(test->card->host, NULL, &ret);
+
+	return ret;
+err:
+	return ret;
+}
+
+/*
+ * Tests a basic transfer with certain parameters
+ */
+static int mmc_test_simple_transfer(struct mmc_test_card *test,
+	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
+	unsigned blocks, unsigned blksz, int write)
+{
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	mrq.stop = &stop;
+
+	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
+		blocks, blksz, write);
+
+	mmc_wait_for_req(test->card->host, &mrq);
+
+	mmc_test_wait_busy(test);
+
+	return mmc_test_check_result(test, &mrq);
+}
+
+/*
+ * Tests a transfer where the card will fail completely or partly
+ */
+static int mmc_test_broken_transfer(struct mmc_test_card *test,
+	unsigned blocks, unsigned blksz, int write)
+{
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
+
+	struct scatterlist sg;
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	mrq.stop = &stop;
+
+	sg_init_one(&sg, test->buffer, blocks * blksz);
+
+	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
+	mmc_test_prepare_broken_mrq(test, &mrq, write);
+
+	mmc_wait_for_req(test->card->host, &mrq);
+
+	mmc_test_wait_busy(test);
+
+	return mmc_test_check_broken_result(test, &mrq);
+}
+
+/*
+ * Does a complete transfer test where data is also validated
+ *
+ * Note: mmc_test_prepare() must have been done before this call
+ */
+static int mmc_test_transfer(struct mmc_test_card *test,
+	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
+	unsigned blocks, unsigned blksz, int write)
+{
+	int ret, i;
+	unsigned long flags;
+
+	if (write) {
+		for (i = 0;i < blocks * blksz;i++)
+			test->scratch[i] = i;
+	} else {
+		memset(test->scratch, 0, BUFFER_SIZE);
+	}
+	local_irq_save(flags);
+	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+	local_irq_restore(flags);
+
+	ret = mmc_test_set_blksize(test, blksz);
+	if (ret)
+		return ret;
+
+	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
+		blocks, blksz, write);
+	if (ret)
+		return ret;
+
+	if (write) {
+		int sectors;
+
+		ret = mmc_test_set_blksize(test, 512);
+		if (ret)
+			return ret;
+
+		sectors = (blocks * blksz + 511) / 512;
+		if ((sectors * 512) == (blocks * blksz))
+			sectors++;
+
+		if ((sectors * 512) > BUFFER_SIZE)
+			return -EINVAL;
+
+		memset(test->buffer, 0, sectors * 512);
+
+		for (i = 0;i < sectors;i++) {
+			ret = mmc_test_buffer_transfer(test,
+				test->buffer + i * 512,
+				dev_addr + i, 512, 0);
+			if (ret)
+				return ret;
+		}
+
+		for (i = 0;i < blocks * blksz;i++) {
+			if (test->buffer[i] != (u8)i)
+				return RESULT_FAIL;
+		}
+
+		for (;i < sectors * 512;i++) {
+			if (test->buffer[i] != 0xDF)
+				return RESULT_FAIL;
+		}
+	} else {
+		local_irq_save(flags);
+		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+		local_irq_restore(flags);
+		for (i = 0;i < blocks * blksz;i++) {
+			if (test->scratch[i] != (u8)i)
+				return RESULT_FAIL;
+		}
+	}
+
+	return 0;
+}
+
+/*******************************************************************/
+/*  Tests                                                          */
+/*******************************************************************/
+
+struct mmc_test_case {
+	const char *name;
+
+	int (*prepare)(struct mmc_test_card *);
+	int (*run)(struct mmc_test_card *);
+	int (*cleanup)(struct mmc_test_card *);
+};
+
+static int mmc_test_basic_write(struct mmc_test_card *test)
+{
+	int ret;
+	struct scatterlist sg;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	sg_init_one(&sg, test->buffer, 512);
+
+	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_basic_read(struct mmc_test_card *test)
+{
+	int ret;
+	struct scatterlist sg;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	sg_init_one(&sg, test->buffer, 512);
+
+	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_verify_write(struct mmc_test_card *test)
+{
+	int ret;
+	struct scatterlist sg;
+
+	sg_init_one(&sg, test->buffer, 512);
+
+	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_verify_read(struct mmc_test_card *test)
+{
+	int ret;
+	struct scatterlist sg;
+
+	sg_init_one(&sg, test->buffer, 512);
+
+	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_multi_write(struct mmc_test_card *test)
+{
+	int ret;
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	sg_init_one(&sg, test->buffer, size);
+
+	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_multi_read(struct mmc_test_card *test)
+{
+	int ret;
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	sg_init_one(&sg, test->buffer, size);
+
+	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_pow2_write(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	if (!test->card->csd.write_partial)
+		return RESULT_UNSUP_CARD;
+
+	for (i = 1; i < 512;i <<= 1) {
+		sg_init_one(&sg, test->buffer, i);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_pow2_read(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	if (!test->card->csd.read_partial)
+		return RESULT_UNSUP_CARD;
+
+	for (i = 1; i < 512;i <<= 1) {
+		sg_init_one(&sg, test->buffer, i);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_weird_write(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	if (!test->card->csd.write_partial)
+		return RESULT_UNSUP_CARD;
+
+	for (i = 3; i < 512;i += 7) {
+		sg_init_one(&sg, test->buffer, i);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_weird_read(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	if (!test->card->csd.read_partial)
+		return RESULT_UNSUP_CARD;
+
+	for (i = 3; i < 512;i += 7) {
+		sg_init_one(&sg, test->buffer, i);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_align_write(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	for (i = 1;i < 4;i++) {
+		sg_init_one(&sg, test->buffer + i, 512);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_align_read(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	for (i = 1;i < 4;i++) {
+		sg_init_one(&sg, test->buffer + i, 512);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_align_multi_write(struct mmc_test_card *test)
+{
+	int ret, i;
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	for (i = 1;i < 4;i++) {
+		sg_init_one(&sg, test->buffer + i, size);
+		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_align_multi_read(struct mmc_test_card *test)
+{
+	int ret, i;
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	for (i = 1;i < 4;i++) {
+		sg_init_one(&sg, test->buffer + i, size);
+		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_xfersize_write(struct mmc_test_card *test)
+{
+	int ret;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	ret = mmc_test_broken_transfer(test, 1, 512, 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_xfersize_read(struct mmc_test_card *test)
+{
+	int ret;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	ret = mmc_test_broken_transfer(test, 1, 512, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
+{
+	int ret;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	ret = mmc_test_broken_transfer(test, 2, 512, 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
+{
+	int ret;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	ret = mmc_test_broken_transfer(test, 2, 512, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+#ifdef CONFIG_HIGHMEM
+
+static int mmc_test_write_high(struct mmc_test_card *test)
+{
+	int ret;
+	struct scatterlist sg;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, test->highmem, 512, 0);
+
+	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_read_high(struct mmc_test_card *test)
+{
+	int ret;
+	struct scatterlist sg;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, test->highmem, 512, 0);
+
+	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_multi_write_high(struct mmc_test_card *test)
+{
+	int ret;
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, test->highmem, size, 0);
+
+	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mmc_test_multi_read_high(struct mmc_test_card *test)
+{
+	int ret;
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, test->highmem, size, 0);
+
+	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+#else
+
+static int mmc_test_no_highmem(struct mmc_test_card *test)
+{
+	pr_info("%s: Highmem not configured - test skipped\n",
+	       mmc_hostname(test->card->host));
+	return 0;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+/*
+ * Map sz bytes so that it can be transferred.
+ */
+static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
+			     int max_scatter, int min_sg_len)
+{
+	struct mmc_test_area *t = &test->area;
+	int err;
+
+	t->blocks = sz >> 9;
+
+	if (max_scatter) {
+		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
+						  t->max_segs, t->max_seg_sz,
+				       &t->sg_len);
+	} else {
+		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
+				      t->max_seg_sz, &t->sg_len, min_sg_len);
+	}
+	if (err)
+		pr_info("%s: Failed to map sg list\n",
+		       mmc_hostname(test->card->host));
+	return err;
+}
+
+/*
+ * Transfer bytes mapped by mmc_test_area_map().
+ */
+static int mmc_test_area_transfer(struct mmc_test_card *test,
+				  unsigned int dev_addr, int write)
+{
+	struct mmc_test_area *t = &test->area;
+
+	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
+					t->blocks, 512, write);
+}
+
+/*
+ * Map and transfer bytes for multiple transfers.
+ */
+static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+				unsigned int dev_addr, int write,
+				int max_scatter, int timed, int count,
+				bool nonblock, int min_sg_len)
+{
+	struct timespec ts1, ts2;
+	int ret = 0;
+	int i;
+	struct mmc_test_area *t = &test->area;
+
+	/*
+	 * In the case of a maximally scattered transfer, the maximum transfer
+	 * size is further limited by using PAGE_SIZE segments.
+	 */
+	if (max_scatter) {
+		struct mmc_test_area *t = &test->area;
+		unsigned long max_tfr;
+
+		if (t->max_seg_sz >= PAGE_SIZE)
+			max_tfr = t->max_segs * PAGE_SIZE;
+		else
+			max_tfr = t->max_segs * t->max_seg_sz;
+		if (sz > max_tfr)
+			sz = max_tfr;
+	}
+
+	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
+	if (ret)
+		return ret;
+
+	if (timed)
+		getnstimeofday(&ts1);
+	if (nonblock)
+		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
+				 dev_addr, t->blocks, 512, write, count);
+	else
+		for (i = 0; i < count && ret == 0; i++) {
+			ret = mmc_test_area_transfer(test, dev_addr, write);
+			dev_addr += sz >> 9;
+		}
+
+	if (ret)
+		return ret;
+
+	if (timed)
+		getnstimeofday(&ts2);
+
+	if (timed)
+		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
+
+	return 0;
+}
+
+static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+			    unsigned int dev_addr, int write, int max_scatter,
+			    int timed)
+{
+	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
+				    timed, 1, false, 0);
+}
+
+/*
+ * Write the test area entirely.
+ */
+static int mmc_test_area_fill(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+
+	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
+}
+
+/*
+ * Erase the test area entirely.
+ */
+static int mmc_test_area_erase(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+
+	if (!mmc_can_erase(test->card))
+		return 0;
+
+	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
+			 MMC_ERASE_ARG);
+}
+
+/*
+ * Cleanup struct mmc_test_area.
+ */
+static int mmc_test_area_cleanup(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+
+	kfree(t->sg);
+	mmc_test_free_mem(t->mem);
+
+	return 0;
+}
+
+/*
+ * Initialize an area for testing large transfers.  The test area is set to the
+ * middle of the card because cards may have different charateristics at the
+ * front (for FAT file system optimization).  Optionally, the area is erased
+ * (if the card supports it) which may improve write performance.  Optionally,
+ * the area is filled with data for subsequent read tests.
+ */
+static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long min_sz = 64 * 1024, sz;
+	int ret;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	/* Make the test area size about 4MiB */
+	sz = (unsigned long)test->card->pref_erase << 9;
+	t->max_sz = sz;
+	while (t->max_sz < 4 * 1024 * 1024)
+		t->max_sz += sz;
+	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
+		t->max_sz -= sz;
+
+	t->max_segs = test->card->host->max_segs;
+	t->max_seg_sz = test->card->host->max_seg_size;
+	t->max_seg_sz -= t->max_seg_sz % 512;
+
+	t->max_tfr = t->max_sz;
+	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
+		t->max_tfr = test->card->host->max_blk_count << 9;
+	if (t->max_tfr > test->card->host->max_req_size)
+		t->max_tfr = test->card->host->max_req_size;
+	if (t->max_tfr / t->max_seg_sz > t->max_segs)
+		t->max_tfr = t->max_segs * t->max_seg_sz;
+
+	/*
+	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
+	 * because the same memory can be mapped into the scatterlist more than
+	 * once.  Also, take into account the limits imposed on scatterlist
+	 * segments by the host driver.
+	 */
+	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
+				    t->max_seg_sz);
+	if (!t->mem)
+		return -ENOMEM;
+
+	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
+	if (!t->sg) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	t->dev_addr = mmc_test_capacity(test->card) / 2;
+	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
+
+	if (erase) {
+		ret = mmc_test_area_erase(test);
+		if (ret)
+			goto out_free;
+	}
+
+	if (fill) {
+		ret = mmc_test_area_fill(test);
+		if (ret)
+			goto out_free;
+	}
+
+	return 0;
+
+out_free:
+	mmc_test_area_cleanup(test);
+	return ret;
+}
+
+/*
+ * Prepare for large transfers.  Do not erase the test area.
+ */
+static int mmc_test_area_prepare(struct mmc_test_card *test)
+{
+	return mmc_test_area_init(test, 0, 0);
+}
+
+/*
+ * Prepare for large transfers.  Do erase the test area.
+ */
+static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
+{
+	return mmc_test_area_init(test, 1, 0);
+}
+
+/*
+ * Prepare for large transfers.  Erase and fill the test area.
+ */
+static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
+{
+	return mmc_test_area_init(test, 1, 1);
+}
+
+/*
+ * Test best-case performance.  Best-case performance is expected from
+ * a single large transfer.
+ *
+ * An additional option (max_scatter) allows the measurement of the same
+ * transfer but with no contiguous pages in the scatter list.  This tests
+ * the efficiency of DMA to handle scattered pages.
+ */
+static int mmc_test_best_performance(struct mmc_test_card *test, int write,
+				     int max_scatter)
+{
+	struct mmc_test_area *t = &test->area;
+
+	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
+				max_scatter, 1);
+}
+
+/*
+ * Best-case read performance.
+ */
+static int mmc_test_best_read_performance(struct mmc_test_card *test)
+{
+	return mmc_test_best_performance(test, 0, 0);
+}
+
+/*
+ * Best-case write performance.
+ */
+static int mmc_test_best_write_performance(struct mmc_test_card *test)
+{
+	return mmc_test_best_performance(test, 1, 0);
+}
+
+/*
+ * Best-case read performance into scattered pages.
+ */
+static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
+{
+	return mmc_test_best_performance(test, 0, 1);
+}
+
+/*
+ * Best-case write performance from scattered pages.
+ */
+static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
+{
+	return mmc_test_best_performance(test, 1, 1);
+}
+
+/*
+ * Single read performance by transfer size.
+ */
+static int mmc_test_profile_read_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	unsigned int dev_addr;
+	int ret;
+
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
+		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
+		if (ret)
+			return ret;
+	}
+	sz = t->max_tfr;
+	dev_addr = t->dev_addr;
+	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
+}
+
+/*
+ * Single write performance by transfer size.
+ */
+static int mmc_test_profile_write_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	unsigned int dev_addr;
+	int ret;
+
+	ret = mmc_test_area_erase(test);
+	if (ret)
+		return ret;
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
+		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
+		if (ret)
+			return ret;
+	}
+	ret = mmc_test_area_erase(test);
+	if (ret)
+		return ret;
+	sz = t->max_tfr;
+	dev_addr = t->dev_addr;
+	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
+}
+
+/*
+ * Single trim performance by transfer size.
+ */
+static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	unsigned int dev_addr;
+	struct timespec ts1, ts2;
+	int ret;
+
+	if (!mmc_can_trim(test->card))
+		return RESULT_UNSUP_CARD;
+
+	if (!mmc_can_erase(test->card))
+		return RESULT_UNSUP_HOST;
+
+	for (sz = 512; sz < t->max_sz; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
+		getnstimeofday(&ts1);
+		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
+		if (ret)
+			return ret;
+		getnstimeofday(&ts2);
+		mmc_test_print_rate(test, sz, &ts1, &ts2);
+	}
+	dev_addr = t->dev_addr;
+	getnstimeofday(&ts1);
+	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
+	if (ret)
+		return ret;
+	getnstimeofday(&ts2);
+	mmc_test_print_rate(test, sz, &ts1, &ts2);
+	return 0;
+}
+
+static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned int dev_addr, i, cnt;
+	struct timespec ts1, ts2;
+	int ret;
+
+	cnt = t->max_sz / sz;
+	dev_addr = t->dev_addr;
+	getnstimeofday(&ts1);
+	for (i = 0; i < cnt; i++) {
+		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
+		if (ret)
+			return ret;
+		dev_addr += (sz >> 9);
+	}
+	getnstimeofday(&ts2);
+	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+	return 0;
+}
+
+/*
+ * Consecutive read performance by transfer size.
+ */
+static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	int ret;
+
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		ret = mmc_test_seq_read_perf(test, sz);
+		if (ret)
+			return ret;
+	}
+	sz = t->max_tfr;
+	return mmc_test_seq_read_perf(test, sz);
+}
+
+static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned int dev_addr, i, cnt;
+	struct timespec ts1, ts2;
+	int ret;
+
+	ret = mmc_test_area_erase(test);
+	if (ret)
+		return ret;
+	cnt = t->max_sz / sz;
+	dev_addr = t->dev_addr;
+	getnstimeofday(&ts1);
+	for (i = 0; i < cnt; i++) {
+		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
+		if (ret)
+			return ret;
+		dev_addr += (sz >> 9);
+	}
+	getnstimeofday(&ts2);
+	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+	return 0;
+}
+
+/*
+ * Consecutive write performance by transfer size.
+ */
+static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	int ret;
+
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		ret = mmc_test_seq_write_perf(test, sz);
+		if (ret)
+			return ret;
+	}
+	sz = t->max_tfr;
+	return mmc_test_seq_write_perf(test, sz);
+}
+
+/*
+ * Consecutive trim performance by transfer size.
+ */
+static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	unsigned int dev_addr, i, cnt;
+	struct timespec ts1, ts2;
+	int ret;
+
+	if (!mmc_can_trim(test->card))
+		return RESULT_UNSUP_CARD;
+
+	if (!mmc_can_erase(test->card))
+		return RESULT_UNSUP_HOST;
+
+	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
+		ret = mmc_test_area_erase(test);
+		if (ret)
+			return ret;
+		ret = mmc_test_area_fill(test);
+		if (ret)
+			return ret;
+		cnt = t->max_sz / sz;
+		dev_addr = t->dev_addr;
+		getnstimeofday(&ts1);
+		for (i = 0; i < cnt; i++) {
+			ret = mmc_erase(test->card, dev_addr, sz >> 9,
+					MMC_TRIM_ARG);
+			if (ret)
+				return ret;
+			dev_addr += (sz >> 9);
+		}
+		getnstimeofday(&ts2);
+		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+	}
+	return 0;
+}
+
+static unsigned int rnd_next = 1;
+
+static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
+{
+	uint64_t r;
+
+	rnd_next = rnd_next * 1103515245 + 12345;
+	r = (rnd_next >> 16) & 0x7fff;
+	return (r * rnd_cnt) >> 15;
+}
+
+static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
+			     unsigned long sz)
+{
+	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
+	unsigned int ssz;
+	struct timespec ts1, ts2, ts;
+	int ret;
+
+	ssz = sz >> 9;
+
+	rnd_addr = mmc_test_capacity(test->card) / 4;
+	range1 = rnd_addr / test->card->pref_erase;
+	range2 = range1 / ssz;
+
+	getnstimeofday(&ts1);
+	for (cnt = 0; cnt < UINT_MAX; cnt++) {
+		getnstimeofday(&ts2);
+		ts = timespec_sub(ts2, ts1);
+		if (ts.tv_sec >= 10)
+			break;
+		ea = mmc_test_rnd_num(range1);
+		if (ea == last_ea)
+			ea -= 1;
+		last_ea = ea;
+		dev_addr = rnd_addr + test->card->pref_erase * ea +
+			   ssz * mmc_test_rnd_num(range2);
+		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
+		if (ret)
+			return ret;
+	}
+	if (print)
+		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+	return 0;
+}
+
+static int mmc_test_random_perf(struct mmc_test_card *test, int write)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned int next;
+	unsigned long sz;
+	int ret;
+
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		/*
+		 * When writing, try to get more consistent results by running
+		 * the test twice with exactly the same I/O but outputting the
+		 * results only for the 2nd run.
+		 */
+		if (write) {
+			next = rnd_next;
+			ret = mmc_test_rnd_perf(test, write, 0, sz);
+			if (ret)
+				return ret;
+			rnd_next = next;
+		}
+		ret = mmc_test_rnd_perf(test, write, 1, sz);
+		if (ret)
+			return ret;
+	}
+	sz = t->max_tfr;
+	if (write) {
+		next = rnd_next;
+		ret = mmc_test_rnd_perf(test, write, 0, sz);
+		if (ret)
+			return ret;
+		rnd_next = next;
+	}
+	return mmc_test_rnd_perf(test, write, 1, sz);
+}
+
+/*
+ * Random read performance by transfer size.
+ */
+static int mmc_test_random_read_perf(struct mmc_test_card *test)
+{
+	return mmc_test_random_perf(test, 0);
+}
+
+/*
+ * Random write performance by transfer size.
+ */
+static int mmc_test_random_write_perf(struct mmc_test_card *test)
+{
+	return mmc_test_random_perf(test, 1);
+}
+
+static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
+			     unsigned int tot_sz, int max_scatter)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned int dev_addr, i, cnt, sz, ssz;
+	struct timespec ts1, ts2;
+	int ret;
+
+	sz = t->max_tfr;
+
+	/*
+	 * In the case of a maximally scattered transfer, the maximum transfer
+	 * size is further limited by using PAGE_SIZE segments.
+	 */
+	if (max_scatter) {
+		unsigned long max_tfr;
+
+		if (t->max_seg_sz >= PAGE_SIZE)
+			max_tfr = t->max_segs * PAGE_SIZE;
+		else
+			max_tfr = t->max_segs * t->max_seg_sz;
+		if (sz > max_tfr)
+			sz = max_tfr;
+	}
+
+	ssz = sz >> 9;
+	dev_addr = mmc_test_capacity(test->card) / 4;
+	if (tot_sz > dev_addr << 9)
+		tot_sz = dev_addr << 9;
+	cnt = tot_sz / sz;
+	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+
+	getnstimeofday(&ts1);
+	for (i = 0; i < cnt; i++) {
+		ret = mmc_test_area_io(test, sz, dev_addr, write,
+				       max_scatter, 0);
+		if (ret)
+			return ret;
+		dev_addr += ssz;
+	}
+	getnstimeofday(&ts2);
+
+	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+
+	return 0;
+}
+
+static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
+{
+	int ret, i;
+
+	for (i = 0; i < 10; i++) {
+		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
+		if (ret)
+			return ret;
+	}
+	for (i = 0; i < 5; i++) {
+		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
+		if (ret)
+			return ret;
+	}
+	for (i = 0; i < 3; i++) {
+		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * Large sequential read performance.
+ */
+static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
+{
+	return mmc_test_large_seq_perf(test, 0);
+}
+
+/*
+ * Large sequential write performance.
+ */
+static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
+{
+	return mmc_test_large_seq_perf(test, 1);
+}
+
+static int mmc_test_rw_multiple(struct mmc_test_card *test,
+				struct mmc_test_multiple_rw *tdata,
+				unsigned int reqsize, unsigned int size,
+				int min_sg_len)
+{
+	unsigned int dev_addr;
+	struct mmc_test_area *t = &test->area;
+	int ret = 0;
+
+	/* Set up test area */
+	if (size > mmc_test_capacity(test->card) / 2 * 512)
+		size = mmc_test_capacity(test->card) / 2 * 512;
+	if (reqsize > t->max_tfr)
+		reqsize = t->max_tfr;
+	dev_addr = mmc_test_capacity(test->card) / 4;
+	if ((dev_addr & 0xffff0000))
+		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+	else
+		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
+	if (!dev_addr)
+		goto err;
+
+	if (reqsize > size)
+		return 0;
+
+	/* prepare test area */
+	if (mmc_can_erase(test->card) &&
+	    tdata->prepare & MMC_TEST_PREP_ERASE) {
+		ret = mmc_erase(test->card, dev_addr,
+				size / 512, MMC_SECURE_ERASE_ARG);
+		if (ret)
+			ret = mmc_erase(test->card, dev_addr,
+					size / 512, MMC_ERASE_ARG);
+		if (ret)
+			goto err;
+	}
+
+	/* Run test */
+	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
+				   tdata->do_write, 0, 1, size / reqsize,
+				   tdata->do_nonblock_req, min_sg_len);
+	if (ret)
+		goto err;
+
+	return ret;
+ err:
+	pr_info("[%s] error\n", __func__);
+	return ret;
+}
+
+static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+				     struct mmc_test_multiple_rw *rw)
+{
+	int ret = 0;
+	int i;
+	void *pre_req = test->card->host->ops->pre_req;
+	void *post_req = test->card->host->ops->post_req;
+
+	if (rw->do_nonblock_req &&
+	    ((!pre_req && post_req) || (pre_req && !post_req))) {
+		pr_info("error: only one of pre/post is defined\n");
+		return -EINVAL;
+	}
+
+	for (i = 0 ; i < rw->len && ret == 0; i++) {
+		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
+				       struct mmc_test_multiple_rw *rw)
+{
+	int ret = 0;
+	int i;
+
+	for (i = 0 ; i < rw->len && ret == 0; i++) {
+		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
+					   rw->sg_len[i]);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Multiple blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
+{
+	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+	struct mmc_test_multiple_rw test_data = {
+		.bs = bs,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(bs),
+		.do_write = true,
+		.do_nonblock_req = false,
+		.prepare = MMC_TEST_PREP_ERASE,
+	};
+
+	return mmc_test_rw_multiple_size(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
+{
+	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+	struct mmc_test_multiple_rw test_data = {
+		.bs = bs,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(bs),
+		.do_write = true,
+		.do_nonblock_req = true,
+		.prepare = MMC_TEST_PREP_ERASE,
+	};
+
+	return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
+{
+	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+	struct mmc_test_multiple_rw test_data = {
+		.bs = bs,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(bs),
+		.do_write = false,
+		.do_nonblock_req = false,
+		.prepare = MMC_TEST_PREP_NONE,
+	};
+
+	return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
+{
+	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+	struct mmc_test_multiple_rw test_data = {
+		.bs = bs,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(bs),
+		.do_write = false,
+		.do_nonblock_req = true,
+		.prepare = MMC_TEST_PREP_NONE,
+	};
+
+	return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
+{
+	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+				 1 << 7, 1 << 8, 1 << 9};
+	struct mmc_test_multiple_rw test_data = {
+		.sg_len = sg_len,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(sg_len),
+		.do_write = true,
+		.do_nonblock_req = false,
+		.prepare = MMC_TEST_PREP_ERASE,
+	};
+
+	return mmc_test_rw_multiple_sg_len(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
+{
+	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+				 1 << 7, 1 << 8, 1 << 9};
+	struct mmc_test_multiple_rw test_data = {
+		.sg_len = sg_len,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(sg_len),
+		.do_write = true,
+		.do_nonblock_req = true,
+		.prepare = MMC_TEST_PREP_ERASE,
+	};
+
+	return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
+{
+	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+				 1 << 7, 1 << 8, 1 << 9};
+	struct mmc_test_multiple_rw test_data = {
+		.sg_len = sg_len,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(sg_len),
+		.do_write = false,
+		.do_nonblock_req = false,
+		.prepare = MMC_TEST_PREP_NONE,
+	};
+
+	return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
+{
+	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+				 1 << 7, 1 << 8, 1 << 9};
+	struct mmc_test_multiple_rw test_data = {
+		.sg_len = sg_len,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(sg_len),
+		.do_write = false,
+		.do_nonblock_req = true,
+		.prepare = MMC_TEST_PREP_NONE,
+	};
+
+	return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * eMMC hardware reset.
+ */
+static int mmc_test_hw_reset(struct mmc_test_card *test)
+{
+	struct mmc_card *card = test->card;
+	struct mmc_host *host = card->host;
+	int err;
+
+	err = mmc_hw_reset_check(host);
+	if (!err)
+		return RESULT_OK;
+
+	if (err == -ENOSYS)
+		return RESULT_FAIL;
+
+	if (err != -EOPNOTSUPP)
+		return err;
+
+	if (!mmc_can_reset(card))
+		return RESULT_UNSUP_CARD;
+
+	return RESULT_UNSUP_HOST;
+}
+
+static const struct mmc_test_case mmc_test_cases[] = {
+	{
+		.name = "Basic write (no data verification)",
+		.run = mmc_test_basic_write,
+	},
+
+	{
+		.name = "Basic read (no data verification)",
+		.run = mmc_test_basic_read,
+	},
+
+	{
+		.name = "Basic write (with data verification)",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_verify_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Basic read (with data verification)",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_verify_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Multi-block write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_multi_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Multi-block read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_multi_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Power of two block writes",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_pow2_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Power of two block reads",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_pow2_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Weird sized block writes",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_weird_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Weird sized block reads",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_weird_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Badly aligned write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_align_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Badly aligned read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_align_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Badly aligned multi-block write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_align_multi_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Badly aligned multi-block read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_align_multi_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Correct xfer_size at write (start failure)",
+		.run = mmc_test_xfersize_write,
+	},
+
+	{
+		.name = "Correct xfer_size at read (start failure)",
+		.run = mmc_test_xfersize_read,
+	},
+
+	{
+		.name = "Correct xfer_size at write (midway failure)",
+		.run = mmc_test_multi_xfersize_write,
+	},
+
+	{
+		.name = "Correct xfer_size at read (midway failure)",
+		.run = mmc_test_multi_xfersize_read,
+	},
+
+#ifdef CONFIG_HIGHMEM
+
+	{
+		.name = "Highmem write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_write_high,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Highmem read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_read_high,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Multi-block highmem write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_multi_write_high,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Multi-block highmem read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_multi_read_high,
+		.cleanup = mmc_test_cleanup,
+	},
+
+#else
+
+	{
+		.name = "Highmem write",
+		.run = mmc_test_no_highmem,
+	},
+
+	{
+		.name = "Highmem read",
+		.run = mmc_test_no_highmem,
+	},
+
+	{
+		.name = "Multi-block highmem write",
+		.run = mmc_test_no_highmem,
+	},
+
+	{
+		.name = "Multi-block highmem read",
+		.run = mmc_test_no_highmem,
+	},
+
+#endif /* CONFIG_HIGHMEM */
+
+	{
+		.name = "Best-case read performance",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_best_read_performance,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Best-case write performance",
+		.prepare = mmc_test_area_prepare_erase,
+		.run = mmc_test_best_write_performance,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Best-case read performance into scattered pages",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_best_read_perf_max_scatter,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Best-case write performance from scattered pages",
+		.prepare = mmc_test_area_prepare_erase,
+		.run = mmc_test_best_write_perf_max_scatter,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Single read performance by transfer size",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_profile_read_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Single write performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_write_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Single trim performance by transfer size",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_profile_trim_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Consecutive read performance by transfer size",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_profile_seq_read_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Consecutive write performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_seq_write_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Consecutive trim performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_seq_trim_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Random read performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_random_read_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Random write performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_random_write_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Large sequential read into scattered pages",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_large_seq_read_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Large sequential write from scattered pages",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_large_seq_write_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Write performance with blocking req 4k to 4MB",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_mult_write_blocking_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Write performance with non-blocking req 4k to 4MB",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_mult_write_nonblock_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Read performance with blocking req 4k to 4MB",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_mult_read_blocking_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Read performance with non-blocking req 4k to 4MB",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_mult_read_nonblock_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Write performance blocking req 1 to 512 sg elems",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_sglen_wr_blocking_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Write performance non-blocking req 1 to 512 sg elems",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_sglen_wr_nonblock_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Read performance blocking req 1 to 512 sg elems",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_sglen_r_blocking_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Read performance non-blocking req 1 to 512 sg elems",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_sglen_r_nonblock_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "eMMC hardware reset",
+		.run = mmc_test_hw_reset,
+	},
+};
+
+static DEFINE_MUTEX(mmc_test_lock);
+
+static LIST_HEAD(mmc_test_result);
+
+static void mmc_test_run(struct mmc_test_card *test, int testcase)
+{
+	int i, ret;
+
+	pr_info("%s: Starting tests of card %s...\n",
+		mmc_hostname(test->card->host), mmc_card_id(test->card));
+
+	mmc_claim_host(test->card->host);
+
+	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
+		struct mmc_test_general_result *gr;
+
+		if (testcase && ((i + 1) != testcase))
+			continue;
+
+		pr_info("%s: Test case %d. %s...\n",
+			mmc_hostname(test->card->host), i + 1,
+			mmc_test_cases[i].name);
+
+		if (mmc_test_cases[i].prepare) {
+			ret = mmc_test_cases[i].prepare(test);
+			if (ret) {
+				pr_info("%s: Result: Prepare "
+					"stage failed! (%d)\n",
+					mmc_hostname(test->card->host),
+					ret);
+				continue;
+			}
+		}
+
+		gr = kzalloc(sizeof(struct mmc_test_general_result),
+			GFP_KERNEL);
+		if (gr) {
+			INIT_LIST_HEAD(&gr->tr_lst);
+
+			/* Assign data what we know already */
+			gr->card = test->card;
+			gr->testcase = i;
+
+			/* Append container to global one */
+			list_add_tail(&gr->link, &mmc_test_result);
+
+			/*
+			 * Save the pointer to created container in our private
+			 * structure.
+			 */
+			test->gr = gr;
+		}
+
+		ret = mmc_test_cases[i].run(test);
+		switch (ret) {
+		case RESULT_OK:
+			pr_info("%s: Result: OK\n",
+				mmc_hostname(test->card->host));
+			break;
+		case RESULT_FAIL:
+			pr_info("%s: Result: FAILED\n",
+				mmc_hostname(test->card->host));
+			break;
+		case RESULT_UNSUP_HOST:
+			pr_info("%s: Result: UNSUPPORTED "
+				"(by host)\n",
+				mmc_hostname(test->card->host));
+			break;
+		case RESULT_UNSUP_CARD:
+			pr_info("%s: Result: UNSUPPORTED "
+				"(by card)\n",
+				mmc_hostname(test->card->host));
+			break;
+		default:
+			pr_info("%s: Result: ERROR (%d)\n",
+				mmc_hostname(test->card->host), ret);
+		}
+
+		/* Save the result */
+		if (gr)
+			gr->result = ret;
+
+		if (mmc_test_cases[i].cleanup) {
+			ret = mmc_test_cases[i].cleanup(test);
+			if (ret) {
+				pr_info("%s: Warning: Cleanup "
+					"stage failed! (%d)\n",
+					mmc_hostname(test->card->host),
+					ret);
+			}
+		}
+	}
+
+	mmc_release_host(test->card->host);
+
+	pr_info("%s: Tests completed.\n",
+		mmc_hostname(test->card->host));
+}
+
+static void mmc_test_free_result(struct mmc_card *card)
+{
+	struct mmc_test_general_result *gr, *grs;
+
+	mutex_lock(&mmc_test_lock);
+
+	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
+		struct mmc_test_transfer_result *tr, *trs;
+
+		if (card && gr->card != card)
+			continue;
+
+		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
+			list_del(&tr->link);
+			kfree(tr);
+		}
+
+		list_del(&gr->link);
+		kfree(gr);
+	}
+
+	mutex_unlock(&mmc_test_lock);
+}
+
+static LIST_HEAD(mmc_test_file_test);
+
+static int mtf_test_show(struct seq_file *sf, void *data)
+{
+	struct mmc_card *card = (struct mmc_card *)sf->private;
+	struct mmc_test_general_result *gr;
+
+	mutex_lock(&mmc_test_lock);
+
+	list_for_each_entry(gr, &mmc_test_result, link) {
+		struct mmc_test_transfer_result *tr;
+
+		if (gr->card != card)
+			continue;
+
+		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
+
+		list_for_each_entry(tr, &gr->tr_lst, link) {
+			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
+				tr->count, tr->sectors,
+				(unsigned long)tr->ts.tv_sec,
+				(unsigned long)tr->ts.tv_nsec,
+				tr->rate, tr->iops / 100, tr->iops % 100);
+		}
+	}
+
+	mutex_unlock(&mmc_test_lock);
+
+	return 0;
+}
+
+static int mtf_test_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mtf_test_show, inode->i_private);
+}
+
+static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct seq_file *sf = (struct seq_file *)file->private_data;
+	struct mmc_card *card = (struct mmc_card *)sf->private;
+	struct mmc_test_card *test;
+	char lbuf[12];
+	long testcase;
+
+	if (count >= sizeof(lbuf))
+		return -EINVAL;
+
+	if (copy_from_user(lbuf, buf, count))
+		return -EFAULT;
+	lbuf[count] = '\0';
+
+	if (strict_strtol(lbuf, 10, &testcase))
+		return -EINVAL;
+
+	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
+	if (!test)
+		return -ENOMEM;
+
+	/*
+	 * Remove all test cases associated with given card. Thus we have only
+	 * actual data of the last run.
+	 */
+	mmc_test_free_result(card);
+
+	test->card = card;
+
+	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
+#ifdef CONFIG_HIGHMEM
+	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+	if (test->buffer && test->highmem) {
+#else
+	if (test->buffer) {
+#endif
+		mutex_lock(&mmc_test_lock);
+		mmc_test_run(test, testcase);
+		mutex_unlock(&mmc_test_lock);
+	}
+
+#ifdef CONFIG_HIGHMEM
+	__free_pages(test->highmem, BUFFER_ORDER);
+#endif
+	kfree(test->buffer);
+	kfree(test);
+
+	return count;
+}
+
+static const struct file_operations mmc_test_fops_test = {
+	.open		= mtf_test_open,
+	.read		= seq_read,
+	.write		= mtf_test_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int mtf_testlist_show(struct seq_file *sf, void *data)
+{
+	int i;
+
+	mutex_lock(&mmc_test_lock);
+
+	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
+		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+
+	mutex_unlock(&mmc_test_lock);
+
+	return 0;
+}
+
+static int mtf_testlist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mtf_testlist_show, inode->i_private);
+}
+
+static const struct file_operations mmc_test_fops_testlist = {
+	.open		= mtf_testlist_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void mmc_test_free_dbgfs_file(struct mmc_card *card)
+{
+	struct mmc_test_dbgfs_file *df, *dfs;
+
+	mutex_lock(&mmc_test_lock);
+
+	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
+		if (card && df->card != card)
+			continue;
+		debugfs_remove(df->file);
+		list_del(&df->link);
+		kfree(df);
+	}
+
+	mutex_unlock(&mmc_test_lock);
+}
+
+static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
+	const char *name, umode_t mode, const struct file_operations *fops)
+{
+	struct dentry *file = NULL;
+	struct mmc_test_dbgfs_file *df;
+
+	if (card->debugfs_root)
+		file = debugfs_create_file(name, mode, card->debugfs_root,
+			card, fops);
+
+	if (IS_ERR_OR_NULL(file)) {
+		dev_err(&card->dev,
+			"Can't create %s. Perhaps debugfs is disabled.\n",
+			name);
+		return -ENODEV;
+	}
+
+	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
+	if (!df) {
+		debugfs_remove(file);
+		dev_err(&card->dev,
+			"Can't allocate memory for internal usage.\n");
+		return -ENOMEM;
+	}
+
+	df->card = card;
+	df->file = file;
+
+	list_add(&df->link, &mmc_test_file_test);
+	return 0;
+}
+
+static int mmc_test_register_dbgfs_file(struct mmc_card *card)
+{
+	int ret;
+
+	mutex_lock(&mmc_test_lock);
+
+	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
+		&mmc_test_fops_test);
+	if (ret)
+		goto err;
+
+	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
+		&mmc_test_fops_testlist);
+	if (ret)
+		goto err;
+
+err:
+	mutex_unlock(&mmc_test_lock);
+
+	return ret;
+}
+
+static int mmc_test_probe(struct mmc_card *card)
+{
+	int ret;
+
+	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
+		return -ENODEV;
+
+	ret = mmc_test_register_dbgfs_file(card);
+	if (ret)
+		return ret;
+
+	dev_info(&card->dev, "Card claimed for testing.\n");
+
+	return 0;
+}
+
+static void mmc_test_remove(struct mmc_card *card)
+{
+	mmc_test_free_result(card);
+	mmc_test_free_dbgfs_file(card);
+}
+
+static struct mmc_driver mmc_driver = {
+	.drv		= {
+		.name	= "mmc_test",
+	},
+	.probe		= mmc_test_probe,
+	.remove		= mmc_test_remove,
+};
+
+static int __init mmc_test_init(void)
+{
+	return mmc_register_driver(&mmc_driver);
+}
+
+static void __exit mmc_test_exit(void)
+{
+	/* Clear stalled data if card is still plugged */
+	mmc_test_free_result(NULL);
+	mmc_test_free_dbgfs_file(NULL);
+
+	mmc_unregister_driver(&mmc_driver);
+}
+
+module_init(mmc_test_init);
+module_exit(mmc_test_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
+MODULE_AUTHOR("Pierre Ossman");
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/card/queue.c b/ap/os/linux/linux-3.4.x/drivers/mmc/card/queue.c
new file mode 100644
index 0000000..996f8e3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/card/queue.c
@@ -0,0 +1,438 @@
+/*
+ *  linux/drivers/mmc/card/queue.c
+ *
+ *  Copyright (C) 2003 Russell King, All Rights Reserved.
+ *  Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include "queue.h"
+
+#define MMC_QUEUE_BOUNCESZ	65536
+
+#define MMC_QUEUE_SUSPENDED	(1 << 0)
+
+/*
+ * Prepare a MMC request. This just filters out odd stuff.
+ */
+static int mmc_prep_request(struct request_queue *q, struct request *req)
+{
+	struct mmc_queue *mq = q->queuedata;
+
+	/*
+	 * We only like normal block requests and discards.
+	 */
+	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
+		blk_dump_rq_flags(req, "MMC bad request");
+		return BLKPREP_KILL;
+	}
+
+	if (mq && mmc_card_removed(mq->card))
+		return BLKPREP_KILL;
+
+	req->cmd_flags |= REQ_DONTPREP;
+
+	return BLKPREP_OK;
+}
+
+static int mmc_queue_thread(void *d)
+{
+	struct mmc_queue *mq = d;
+	struct request_queue *q = mq->queue;
+
+	current->flags |= PF_MEMALLOC;
+
+	down(&mq->thread_sem);
+	do {
+		struct request *req = NULL;
+		struct mmc_queue_req *tmp;
+
+		spin_lock_irq(q->queue_lock);
+		set_current_state(TASK_INTERRUPTIBLE);
+		req = blk_fetch_request(q);
+		mq->mqrq_cur->req = req;
+		spin_unlock_irq(q->queue_lock);
+
+		if (req || mq->mqrq_prev->req) {
+			set_current_state(TASK_RUNNING);
+			mq->issue_fn(mq, req);
+		} else {
+			if (kthread_should_stop()) {
+				set_current_state(TASK_RUNNING);
+				break;
+			}
+			up(&mq->thread_sem);
+			schedule();
+			down(&mq->thread_sem);
+		}
+
+		/* Current request becomes previous request and vice versa. */
+		mq->mqrq_prev->brq.mrq.data = NULL;
+		mq->mqrq_prev->req = NULL;
+		tmp = mq->mqrq_prev;
+		mq->mqrq_prev = mq->mqrq_cur;
+		mq->mqrq_cur = tmp;
+	} while (1);
+	up(&mq->thread_sem);
+
+	return 0;
+}
+
+/*
+ * Generic MMC request handler.  This is called for any queue on a
+ * particular host.  When the host is not busy, we look for a request
+ * on any queue on this host, and attempt to issue it.  This may
+ * not be the queue we were asked to process.
+ */
+static void mmc_request(struct request_queue *q)
+{
+	struct mmc_queue *mq = q->queuedata;
+	struct request *req;
+
+	if (!mq) {
+		while ((req = blk_fetch_request(q)) != NULL) {
+			req->cmd_flags |= REQ_QUIET;
+			__blk_end_request_all(req, -EIO);
+		}
+		return;
+	}
+
+	if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
+		wake_up_process(mq->thread);
+}
+
+static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+{
+	struct scatterlist *sg;
+
+	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+	if (!sg)
+		*err = -ENOMEM;
+	else {
+		*err = 0;
+		sg_init_table(sg, sg_len);
+	}
+
+	return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+				    struct mmc_card *card)
+{
+	unsigned max_discard;
+
+	max_discard = mmc_calc_max_discard(card);
+	if (!max_discard)
+		return;
+
+	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+	q->limits.max_discard_sectors = max_discard;
+	if (card->erased_byte == 0 && !mmc_can_discard(card))
+		q->limits.discard_zeroes_data = 1;
+	q->limits.discard_granularity = card->pref_erase << 9;
+	/* granularity must not be greater than max. discard */
+	if (card->pref_erase > max_discard)
+		q->limits.discard_granularity = 0;
+	if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
+		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+}
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
+ *
+ * Initialise a MMC card request queue.
+ */
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+		   spinlock_t *lock, const char *subname)
+{
+	struct mmc_host *host = card->host;
+	u64 limit = BLK_BOUNCE_HIGH;
+	int ret;
+	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+
+	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+		limit = *mmc_dev(host)->dma_mask;
+
+	mq->card = card;
+	mq->queue = blk_init_queue(mmc_request, lock);
+	if (!mq->queue)
+		return -ENOMEM;
+
+	memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
+	memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
+	mq->mqrq_cur = mqrq_cur;
+	mq->mqrq_prev = mqrq_prev;
+	mq->queue->queuedata = mq;
+
+	blk_queue_prep_rq(mq->queue, mmc_prep_request);
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+	if (mmc_can_erase(card))
+		mmc_queue_setup_discard(mq->queue, card);
+
+#ifdef CONFIG_MMC_BLOCK_BOUNCE
+	if (host->max_segs == 1) {
+		unsigned int bouncesz;
+
+		bouncesz = MMC_QUEUE_BOUNCESZ;
+
+		if (bouncesz > host->max_req_size)
+			bouncesz = host->max_req_size;
+		if (bouncesz > host->max_seg_size)
+			bouncesz = host->max_seg_size;
+		if (bouncesz > (host->max_blk_count * 512))
+			bouncesz = host->max_blk_count * 512;
+
+		if (bouncesz > 512) {
+			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+			if (!mqrq_cur->bounce_buf) {
+				pr_warning("%s: unable to "
+					"allocate bounce cur buffer\n",
+					mmc_card_name(card));
+			}
+			mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+			if (!mqrq_prev->bounce_buf) {
+				pr_warning("%s: unable to "
+					"allocate bounce prev buffer\n",
+					mmc_card_name(card));
+				kfree(mqrq_cur->bounce_buf);
+				mqrq_cur->bounce_buf = NULL;
+			}
+		}
+
+		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
+			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
+			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
+			blk_queue_max_segments(mq->queue, bouncesz / 512);
+			blk_queue_max_segment_size(mq->queue, bouncesz);
+
+			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+			if (ret)
+				goto cleanup_queue;
+
+			mqrq_cur->bounce_sg =
+				mmc_alloc_sg(bouncesz / 512, &ret);
+			if (ret)
+				goto cleanup_queue;
+
+			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+			if (ret)
+				goto cleanup_queue;
+
+			mqrq_prev->bounce_sg =
+				mmc_alloc_sg(bouncesz / 512, &ret);
+			if (ret)
+				goto cleanup_queue;
+		}
+	}
+#endif
+
+	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+		blk_queue_bounce_limit(mq->queue, limit);
+		blk_queue_max_hw_sectors(mq->queue,
+			min(host->max_blk_count, host->max_req_size / 512));
+		blk_queue_max_segments(mq->queue, host->max_segs);
+		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
+		if (ret)
+			goto cleanup_queue;
+
+
+		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+		if (ret)
+			goto cleanup_queue;
+	}
+
+	sema_init(&mq->thread_sem, 1);
+
+	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
+		host->index, subname ? subname : "");
+
+	if (IS_ERR(mq->thread)) {
+		ret = PTR_ERR(mq->thread);
+		goto free_bounce_sg;
+	}
+
+	return 0;
+ free_bounce_sg:
+	kfree(mqrq_cur->bounce_sg);
+	mqrq_cur->bounce_sg = NULL;
+	kfree(mqrq_prev->bounce_sg);
+	mqrq_prev->bounce_sg = NULL;
+
+ cleanup_queue:
+	kfree(mqrq_cur->sg);
+	mqrq_cur->sg = NULL;
+	kfree(mqrq_cur->bounce_buf);
+	mqrq_cur->bounce_buf = NULL;
+
+	kfree(mqrq_prev->sg);
+	mqrq_prev->sg = NULL;
+	kfree(mqrq_prev->bounce_buf);
+	mqrq_prev->bounce_buf = NULL;
+
+	blk_cleanup_queue(mq->queue);
+	return ret;
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
+	struct request_queue *q = mq->queue;
+	unsigned long flags;
+	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
+
+	/* Make sure the queue isn't suspended, as that will deadlock */
+	mmc_queue_resume(mq);
+
+	/* Then terminate our worker thread */
+	kthread_stop(mq->thread);
+
+	/* Empty the queue */
+	spin_lock_irqsave(q->queue_lock, flags);
+	q->queuedata = NULL;
+	blk_start_queue(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	kfree(mqrq_cur->bounce_sg);
+	mqrq_cur->bounce_sg = NULL;
+
+	kfree(mqrq_cur->sg);
+	mqrq_cur->sg = NULL;
+
+	kfree(mqrq_cur->bounce_buf);
+	mqrq_cur->bounce_buf = NULL;
+
+	kfree(mqrq_prev->bounce_sg);
+	mqrq_prev->bounce_sg = NULL;
+
+	kfree(mqrq_prev->sg);
+	mqrq_prev->sg = NULL;
+
+	kfree(mqrq_prev->bounce_buf);
+	mqrq_prev->bounce_buf = NULL;
+
+	mq->card = NULL;
+}
+EXPORT_SYMBOL(mmc_cleanup_queue);
+
+/**
+ * mmc_queue_suspend - suspend a MMC request queue
+ * @mq: MMC queue to suspend
+ *
+ * Stop the block request queue, and wait for our thread to
+ * complete any outstanding requests.  This ensures that we
+ * won't suspend while a request is being processed.
+ */
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+	struct request_queue *q = mq->queue;
+	unsigned long flags;
+
+	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
+		mq->flags |= MMC_QUEUE_SUSPENDED;
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		blk_stop_queue(q);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+
+		down(&mq->thread_sem);
+	}
+}
+
+/**
+ * mmc_queue_resume - resume a previously suspended MMC request queue
+ * @mq: MMC queue to resume
+ */
+void mmc_queue_resume(struct mmc_queue *mq)
+{
+	struct request_queue *q = mq->queue;
+	unsigned long flags;
+
+	if (mq->flags & MMC_QUEUE_SUSPENDED) {
+		mq->flags &= ~MMC_QUEUE_SUSPENDED;
+
+		up(&mq->thread_sem);
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		blk_start_queue(q);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+	}
+}
+
+/*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
+{
+	unsigned int sg_len;
+	size_t buflen;
+	struct scatterlist *sg;
+	int i;
+
+	if (!mqrq->bounce_buf)
+		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+
+	BUG_ON(!mqrq->bounce_sg);
+
+	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+
+	mqrq->bounce_sg_len = sg_len;
+
+	buflen = 0;
+	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
+		buflen += sg->length;
+
+	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
+
+	return 1;
+}
+
+/*
+ * If writing, bounce the data to the buffer before the request
+ * is sent to the host driver
+ */
+void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
+{
+	if (!mqrq->bounce_buf)
+		return;
+
+	if (rq_data_dir(mqrq->req) != WRITE)
+		return;
+
+	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+		mqrq->bounce_buf, mqrq->sg[0].length);
+}
+
+/*
+ * If reading, bounce the data from the buffer after the request
+ * has been handled by the host driver
+ */
+void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
+{
+	if (!mqrq->bounce_buf)
+		return;
+
+	if (rq_data_dir(mqrq->req) != READ)
+		return;
+
+	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+		mqrq->bounce_buf, mqrq->sg[0].length);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/card/queue.h b/ap/os/linux/linux-3.4.x/drivers/mmc/card/queue.h
new file mode 100644
index 0000000..d2a1eb4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/card/queue.h
@@ -0,0 +1,49 @@
+#ifndef MMC_QUEUE_H
+#define MMC_QUEUE_H
+
+struct request;
+struct task_struct;
+
+struct mmc_blk_request {
+	struct mmc_request	mrq;
+	struct mmc_command	sbc;
+	struct mmc_command	cmd;
+	struct mmc_command	stop;
+	struct mmc_data		data;
+};
+
+struct mmc_queue_req {
+	struct request		*req;
+	struct mmc_blk_request	brq;
+	struct scatterlist	*sg;
+	char			*bounce_buf;
+	struct scatterlist	*bounce_sg;
+	unsigned int		bounce_sg_len;
+	struct mmc_async_req	mmc_active;
+};
+
+struct mmc_queue {
+	struct mmc_card		*card;
+	struct task_struct	*thread;
+	struct semaphore	thread_sem;
+	unsigned int		flags;
+	int			(*issue_fn)(struct mmc_queue *, struct request *);
+	void			*data;
+	struct request_queue	*queue;
+	struct mmc_queue_req	mqrq[2];
+	struct mmc_queue_req	*mqrq_cur;
+	struct mmc_queue_req	*mqrq_prev;
+};
+
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+			  const char *);
+extern void mmc_cleanup_queue(struct mmc_queue *);
+extern void mmc_queue_suspend(struct mmc_queue *);
+extern void mmc_queue_resume(struct mmc_queue *);
+
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+				     struct mmc_queue_req *);
+extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
+extern void mmc_queue_bounce_post(struct mmc_queue_req *);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/card/sdio_uart.c b/ap/os/linux/linux-3.4.x/drivers/mmc/card/sdio_uart.c
new file mode 100644
index 0000000..5a2cbfa
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/card/sdio_uart.c
@@ -0,0 +1,1219 @@
+/*
+ * linux/drivers/mmc/card/sdio_uart.c - SDIO UART/GPS driver
+ *
+ * Based on drivers/serial/8250.c and drivers/serial/serial_core.c
+ * by Russell King.
+ *
+ * Author:	Nicolas Pitre
+ * Created:	June 15, 2007
+ * Copyright:	MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+/*
+ * Note: Although this driver assumes a 16550A-like UART implementation,
+ * it is not possible to leverage the common 8250/16550 driver, nor the
+ * core UART infrastructure, as they assumes direct access to the hardware
+ * registers, often under a spinlock.  This is not possible in the SDIO
+ * context as SDIO access functions must be able to sleep.
+ *
+ * Because we need to lock the SDIO host to ensure an exclusive access to
+ * the card, we simply rely on that lock to also prevent and serialize
+ * concurrent access to the same port.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+
+#define UART_NR		8	/* Number of UARTs this driver can handle */
+
+
+#define FIFO_SIZE	PAGE_SIZE
+#define WAKEUP_CHARS	256
+
+struct uart_icount {
+	__u32	cts;
+	__u32	dsr;
+	__u32	rng;
+	__u32	dcd;
+	__u32	rx;
+	__u32	tx;
+	__u32	frame;
+	__u32	overrun;
+	__u32	parity;
+	__u32	brk;
+};
+
+struct sdio_uart_port {
+	struct tty_port		port;
+	struct kref		kref;
+	struct tty_struct	*tty;
+	unsigned int		index;
+	struct sdio_func	*func;
+	struct mutex		func_lock;
+	struct task_struct	*in_sdio_uart_irq;
+	unsigned int		regs_offset;
+	struct kfifo		xmit_fifo;
+	spinlock_t		write_lock;
+	struct uart_icount	icount;
+	unsigned int		uartclk;
+	unsigned int		mctrl;
+	unsigned int		rx_mctrl;
+	unsigned int		read_status_mask;
+	unsigned int		ignore_status_mask;
+	unsigned char		x_char;
+	unsigned char           ier;
+	unsigned char           lcr;
+};
+
+static struct sdio_uart_port *sdio_uart_table[UART_NR];
+static DEFINE_SPINLOCK(sdio_uart_table_lock);
+
+static int sdio_uart_add_port(struct sdio_uart_port *port)
+{
+	int index, ret = -EBUSY;
+
+	kref_init(&port->kref);
+	mutex_init(&port->func_lock);
+	spin_lock_init(&port->write_lock);
+	if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
+		return -ENOMEM;
+
+	spin_lock(&sdio_uart_table_lock);
+	for (index = 0; index < UART_NR; index++) {
+		if (!sdio_uart_table[index]) {
+			port->index = index;
+			sdio_uart_table[index] = port;
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock(&sdio_uart_table_lock);
+
+	return ret;
+}
+
+static struct sdio_uart_port *sdio_uart_port_get(unsigned index)
+{
+	struct sdio_uart_port *port;
+
+	if (index >= UART_NR)
+		return NULL;
+
+	spin_lock(&sdio_uart_table_lock);
+	port = sdio_uart_table[index];
+	if (port)
+		kref_get(&port->kref);
+	spin_unlock(&sdio_uart_table_lock);
+
+	return port;
+}
+
+static void sdio_uart_port_destroy(struct kref *kref)
+{
+	struct sdio_uart_port *port =
+		container_of(kref, struct sdio_uart_port, kref);
+	kfifo_free(&port->xmit_fifo);
+	kfree(port);
+}
+
+static void sdio_uart_port_put(struct sdio_uart_port *port)
+{
+	kref_put(&port->kref, sdio_uart_port_destroy);
+}
+
+static void sdio_uart_port_remove(struct sdio_uart_port *port)
+{
+	struct sdio_func *func;
+	struct tty_struct *tty;
+
+	BUG_ON(sdio_uart_table[port->index] != port);
+
+	spin_lock(&sdio_uart_table_lock);
+	sdio_uart_table[port->index] = NULL;
+	spin_unlock(&sdio_uart_table_lock);
+
+	/*
+	 * We're killing a port that potentially still is in use by
+	 * the tty layer. Be careful to prevent any further access
+	 * to the SDIO function and arrange for the tty layer to
+	 * give up on that port ASAP.
+	 * Beware: the lock ordering is critical.
+	 */
+	mutex_lock(&port->port.mutex);
+	mutex_lock(&port->func_lock);
+	func = port->func;
+	sdio_claim_host(func);
+	port->func = NULL;
+	mutex_unlock(&port->func_lock);
+	tty = tty_port_tty_get(&port->port);
+	/* tty_hangup is async so is this safe as is ?? */
+	if (tty) {
+		tty_hangup(tty);
+		tty_kref_put(tty);
+	}
+	mutex_unlock(&port->port.mutex);
+	sdio_release_irq(func);
+	sdio_disable_func(func);
+	sdio_release_host(func);
+
+	sdio_uart_port_put(port);
+}
+
+static int sdio_uart_claim_func(struct sdio_uart_port *port)
+{
+	mutex_lock(&port->func_lock);
+	if (unlikely(!port->func)) {
+		mutex_unlock(&port->func_lock);
+		return -ENODEV;
+	}
+	if (likely(port->in_sdio_uart_irq != current))
+		sdio_claim_host(port->func);
+	mutex_unlock(&port->func_lock);
+	return 0;
+}
+
+static inline void sdio_uart_release_func(struct sdio_uart_port *port)
+{
+	if (likely(port->in_sdio_uart_irq != current))
+		sdio_release_host(port->func);
+}
+
+static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
+{
+	unsigned char c;
+	c = sdio_readb(port->func, port->regs_offset + offset, NULL);
+	return c;
+}
+
+static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
+{
+	sdio_writeb(port->func, value, port->regs_offset + offset, NULL);
+}
+
+static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
+{
+	unsigned char status;
+	unsigned int ret;
+
+	/* FIXME: What stops this losing the delta bits and breaking
+	   sdio_uart_check_modem_status ? */
+	status = sdio_in(port, UART_MSR);
+
+	ret = 0;
+	if (status & UART_MSR_DCD)
+		ret |= TIOCM_CAR;
+	if (status & UART_MSR_RI)
+		ret |= TIOCM_RNG;
+	if (status & UART_MSR_DSR)
+		ret |= TIOCM_DSR;
+	if (status & UART_MSR_CTS)
+		ret |= TIOCM_CTS;
+	return ret;
+}
+
+static void sdio_uart_write_mctrl(struct sdio_uart_port *port,
+				  unsigned int mctrl)
+{
+	unsigned char mcr = 0;
+
+	if (mctrl & TIOCM_RTS)
+		mcr |= UART_MCR_RTS;
+	if (mctrl & TIOCM_DTR)
+		mcr |= UART_MCR_DTR;
+	if (mctrl & TIOCM_OUT1)
+		mcr |= UART_MCR_OUT1;
+	if (mctrl & TIOCM_OUT2)
+		mcr |= UART_MCR_OUT2;
+	if (mctrl & TIOCM_LOOP)
+		mcr |= UART_MCR_LOOP;
+
+	sdio_out(port, UART_MCR, mcr);
+}
+
+static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
+					  unsigned int set, unsigned int clear)
+{
+	unsigned int old;
+
+	old = port->mctrl;
+	port->mctrl = (old & ~clear) | set;
+	if (old != port->mctrl)
+		sdio_uart_write_mctrl(port, port->mctrl);
+}
+
+#define sdio_uart_set_mctrl(port, x)	sdio_uart_update_mctrl(port, x, 0)
+#define sdio_uart_clear_mctrl(port, x)	sdio_uart_update_mctrl(port, 0, x)
+
+static void sdio_uart_change_speed(struct sdio_uart_port *port,
+				   struct ktermios *termios,
+				   struct ktermios *old)
+{
+	unsigned char cval, fcr = 0;
+	unsigned int baud, quot;
+
+	switch (termios->c_cflag & CSIZE) {
+	case CS5:
+		cval = UART_LCR_WLEN5;
+		break;
+	case CS6:
+		cval = UART_LCR_WLEN6;
+		break;
+	case CS7:
+		cval = UART_LCR_WLEN7;
+		break;
+	default:
+	case CS8:
+		cval = UART_LCR_WLEN8;
+		break;
+	}
+
+	if (termios->c_cflag & CSTOPB)
+		cval |= UART_LCR_STOP;
+	if (termios->c_cflag & PARENB)
+		cval |= UART_LCR_PARITY;
+	if (!(termios->c_cflag & PARODD))
+		cval |= UART_LCR_EPAR;
+
+	for (;;) {
+		baud = tty_termios_baud_rate(termios);
+		if (baud == 0)
+			baud = 9600;  /* Special case: B0 rate. */
+		if (baud <= port->uartclk)
+			break;
+		/*
+		 * Oops, the quotient was zero.  Try again with the old
+		 * baud rate if possible, otherwise default to 9600.
+		 */
+		termios->c_cflag &= ~CBAUD;
+		if (old) {
+			termios->c_cflag |= old->c_cflag & CBAUD;
+			old = NULL;
+		} else
+			termios->c_cflag |= B9600;
+	}
+	quot = (2 * port->uartclk + baud) / (2 * baud);
+
+	if (baud < 2400)
+		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+	else
+		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
+
+	port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+	if (termios->c_iflag & INPCK)
+		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		port->read_status_mask |= UART_LSR_BI;
+
+	/*
+	 * Characters to ignore
+	 */
+	port->ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+	if (termios->c_iflag & IGNBRK) {
+		port->ignore_status_mask |= UART_LSR_BI;
+		/*
+		 * If we're ignoring parity and break indicators,
+		 * ignore overruns too (for real raw support).
+		 */
+		if (termios->c_iflag & IGNPAR)
+			port->ignore_status_mask |= UART_LSR_OE;
+	}
+
+	/*
+	 * ignore all characters if CREAD is not set
+	 */
+	if ((termios->c_cflag & CREAD) == 0)
+		port->ignore_status_mask |= UART_LSR_DR;
+
+	/*
+	 * CTS flow control flag and modem status interrupts
+	 */
+	port->ier &= ~UART_IER_MSI;
+	if ((termios->c_cflag & CRTSCTS) || !(termios->c_cflag & CLOCAL))
+		port->ier |= UART_IER_MSI;
+
+	port->lcr = cval;
+
+	sdio_out(port, UART_IER, port->ier);
+	sdio_out(port, UART_LCR, cval | UART_LCR_DLAB);
+	sdio_out(port, UART_DLL, quot & 0xff);
+	sdio_out(port, UART_DLM, quot >> 8);
+	sdio_out(port, UART_LCR, cval);
+	sdio_out(port, UART_FCR, fcr);
+
+	sdio_uart_write_mctrl(port, port->mctrl);
+}
+
+static void sdio_uart_start_tx(struct sdio_uart_port *port)
+{
+	if (!(port->ier & UART_IER_THRI)) {
+		port->ier |= UART_IER_THRI;
+		sdio_out(port, UART_IER, port->ier);
+	}
+}
+
+static void sdio_uart_stop_tx(struct sdio_uart_port *port)
+{
+	if (port->ier & UART_IER_THRI) {
+		port->ier &= ~UART_IER_THRI;
+		sdio_out(port, UART_IER, port->ier);
+	}
+}
+
+static void sdio_uart_stop_rx(struct sdio_uart_port *port)
+{
+	port->ier &= ~UART_IER_RLSI;
+	port->read_status_mask &= ~UART_LSR_DR;
+	sdio_out(port, UART_IER, port->ier);
+}
+
+static void sdio_uart_receive_chars(struct sdio_uart_port *port,
+				    unsigned int *status)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+	unsigned int ch, flag;
+	int max_count = 256;
+
+	do {
+		ch = sdio_in(port, UART_RX);
+		flag = TTY_NORMAL;
+		port->icount.rx++;
+
+		if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+					UART_LSR_FE | UART_LSR_OE))) {
+			/*
+			 * For statistics only
+			 */
+			if (*status & UART_LSR_BI) {
+				*status &= ~(UART_LSR_FE | UART_LSR_PE);
+				port->icount.brk++;
+			} else if (*status & UART_LSR_PE)
+				port->icount.parity++;
+			else if (*status & UART_LSR_FE)
+				port->icount.frame++;
+			if (*status & UART_LSR_OE)
+				port->icount.overrun++;
+
+			/*
+			 * Mask off conditions which should be ignored.
+			 */
+			*status &= port->read_status_mask;
+			if (*status & UART_LSR_BI)
+				flag = TTY_BREAK;
+			else if (*status & UART_LSR_PE)
+				flag = TTY_PARITY;
+			else if (*status & UART_LSR_FE)
+				flag = TTY_FRAME;
+		}
+
+		if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
+			if (tty)
+				tty_insert_flip_char(tty, ch, flag);
+
+		/*
+		 * Overrun is special.  Since it's reported immediately,
+		 * it doesn't affect the current character.
+		 */
+		if (*status & ~port->ignore_status_mask & UART_LSR_OE)
+			if (tty)
+				tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+		*status = sdio_in(port, UART_LSR);
+	} while ((*status & UART_LSR_DR) && (max_count-- > 0));
+	if (tty) {
+		tty_flip_buffer_push(tty);
+		tty_kref_put(tty);
+	}
+}
+
+static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
+{
+	struct kfifo *xmit = &port->xmit_fifo;
+	int count;
+	struct tty_struct *tty;
+	u8 iobuf[16];
+	int len;
+
+	if (port->x_char) {
+		sdio_out(port, UART_TX, port->x_char);
+		port->icount.tx++;
+		port->x_char = 0;
+		return;
+	}
+
+	tty = tty_port_tty_get(&port->port);
+
+	if (tty == NULL || !kfifo_len(xmit) ||
+				tty->stopped || tty->hw_stopped) {
+		sdio_uart_stop_tx(port);
+		tty_kref_put(tty);
+		return;
+	}
+
+	len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
+	for (count = 0; count < len; count++) {
+		sdio_out(port, UART_TX, iobuf[count]);
+		port->icount.tx++;
+	}
+
+	len = kfifo_len(xmit);
+	if (len < WAKEUP_CHARS) {
+		tty_wakeup(tty);
+		if (len == 0)
+			sdio_uart_stop_tx(port);
+	}
+	tty_kref_put(tty);
+}
+
+static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
+{
+	int status;
+	struct tty_struct *tty;
+
+	status = sdio_in(port, UART_MSR);
+
+	if ((status & UART_MSR_ANY_DELTA) == 0)
+		return;
+
+	if (status & UART_MSR_TERI)
+		port->icount.rng++;
+	if (status & UART_MSR_DDSR)
+		port->icount.dsr++;
+	if (status & UART_MSR_DDCD) {
+		port->icount.dcd++;
+		/* DCD raise - wake for open */
+		if (status & UART_MSR_DCD)
+			wake_up_interruptible(&port->port.open_wait);
+		else {
+			/* DCD drop - hang up if tty attached */
+			tty = tty_port_tty_get(&port->port);
+			if (tty) {
+				tty_hangup(tty);
+				tty_kref_put(tty);
+			}
+		}
+	}
+	if (status & UART_MSR_DCTS) {
+		port->icount.cts++;
+		tty = tty_port_tty_get(&port->port);
+		if (tty && (tty->termios->c_cflag & CRTSCTS)) {
+			int cts = (status & UART_MSR_CTS);
+			if (tty->hw_stopped) {
+				if (cts) {
+					tty->hw_stopped = 0;
+					sdio_uart_start_tx(port);
+					tty_wakeup(tty);
+				}
+			} else {
+				if (!cts) {
+					tty->hw_stopped = 1;
+					sdio_uart_stop_tx(port);
+				}
+			}
+		}
+		tty_kref_put(tty);
+	}
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static void sdio_uart_irq(struct sdio_func *func)
+{
+	struct sdio_uart_port *port = sdio_get_drvdata(func);
+	unsigned int iir, lsr;
+
+	/*
+	 * In a few places sdio_uart_irq() is called directly instead of
+	 * waiting for the actual interrupt to be raised and the SDIO IRQ
+	 * thread scheduled in order to reduce latency.  However, some
+	 * interaction with the tty core may end up calling us back
+	 * (serial echo, flow control, etc.) through those same places
+	 * causing undesirable effects.  Let's stop the recursion here.
+	 */
+	if (unlikely(port->in_sdio_uart_irq == current))
+		return;
+
+	iir = sdio_in(port, UART_IIR);
+	if (iir & UART_IIR_NO_INT)
+		return;
+
+	port->in_sdio_uart_irq = current;
+	lsr = sdio_in(port, UART_LSR);
+	if (lsr & UART_LSR_DR)
+		sdio_uart_receive_chars(port, &lsr);
+	sdio_uart_check_modem_status(port);
+	if (lsr & UART_LSR_THRE)
+		sdio_uart_transmit_chars(port);
+	port->in_sdio_uart_irq = NULL;
+}
+
+static int uart_carrier_raised(struct tty_port *tport)
+{
+	struct sdio_uart_port *port =
+			container_of(tport, struct sdio_uart_port, port);
+	unsigned int ret = sdio_uart_claim_func(port);
+	if (ret)	/* Missing hardware shouldn't block for carrier */
+		return 1;
+	ret = sdio_uart_get_mctrl(port);
+	sdio_uart_release_func(port);
+	if (ret & TIOCM_CAR)
+		return 1;
+	return 0;
+}
+
+/**
+ *	uart_dtr_rts		-	 port helper to set uart signals
+ *	@tport: tty port to be updated
+ *	@onoff: set to turn on DTR/RTS
+ *
+ *	Called by the tty port helpers when the modem signals need to be
+ *	adjusted during an open, close and hangup.
+ */
+
+static void uart_dtr_rts(struct tty_port *tport, int onoff)
+{
+	struct sdio_uart_port *port =
+			container_of(tport, struct sdio_uart_port, port);
+	int ret = sdio_uart_claim_func(port);
+	if (ret)
+		return;
+	if (onoff == 0)
+		sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+	else
+		sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+	sdio_uart_release_func(port);
+}
+
+/**
+ *	sdio_uart_activate	-	start up hardware
+ *	@tport: tty port to activate
+ *	@tty: tty bound to this port
+ *
+ *	Activate a tty port. The port locking guarantees us this will be
+ *	run exactly once per set of opens, and if successful will see the
+ *	shutdown method run exactly once to match. Start up and shutdown are
+ *	protected from each other by the internal locking and will not run
+ *	at the same time even during a hangup event.
+ *
+ *	If we successfully start up the port we take an extra kref as we
+ *	will keep it around until shutdown when the kref is dropped.
+ */
+
+static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+	struct sdio_uart_port *port =
+			container_of(tport, struct sdio_uart_port, port);
+	int ret;
+
+	/*
+	 * Set the TTY IO error marker - we will only clear this
+	 * once we have successfully opened the port.
+	 */
+	set_bit(TTY_IO_ERROR, &tty->flags);
+
+	kfifo_reset(&port->xmit_fifo);
+
+	ret = sdio_uart_claim_func(port);
+	if (ret)
+		return ret;
+	ret = sdio_enable_func(port->func);
+	if (ret)
+		goto err1;
+	ret = sdio_claim_irq(port->func, sdio_uart_irq);
+	if (ret)
+		goto err2;
+
+	/*
+	 * Clear the FIFO buffers and disable them.
+	 * (they will be reenabled in sdio_change_speed())
+	 */
+	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
+	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
+		       UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+	sdio_out(port, UART_FCR, 0);
+
+	/*
+	 * Clear the interrupt registers.
+	 */
+	(void) sdio_in(port, UART_LSR);
+	(void) sdio_in(port, UART_RX);
+	(void) sdio_in(port, UART_IIR);
+	(void) sdio_in(port, UART_MSR);
+
+	/*
+	 * Now, initialize the UART
+	 */
+	sdio_out(port, UART_LCR, UART_LCR_WLEN8);
+
+	port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;
+	port->mctrl = TIOCM_OUT2;
+
+	sdio_uart_change_speed(port, tty->termios, NULL);
+
+	if (tty->termios->c_cflag & CBAUD)
+		sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
+
+	if (tty->termios->c_cflag & CRTSCTS)
+		if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
+			tty->hw_stopped = 1;
+
+	clear_bit(TTY_IO_ERROR, &tty->flags);
+
+	/* Kick the IRQ handler once while we're still holding the host lock */
+	sdio_uart_irq(port->func);
+
+	sdio_uart_release_func(port);
+	return 0;
+
+err2:
+	sdio_disable_func(port->func);
+err1:
+	sdio_uart_release_func(port);
+	return ret;
+}
+
+/**
+ *	sdio_uart_shutdown	-	stop hardware
+ *	@tport: tty port to shut down
+ *
+ *	Deactivate a tty port. The port locking guarantees us this will be
+ *	run only if a successful matching activate already ran. The two are
+ *	protected from each other by the internal locking and will not run
+ *	at the same time even during a hangup event.
+ */
+
+static void sdio_uart_shutdown(struct tty_port *tport)
+{
+	struct sdio_uart_port *port =
+			container_of(tport, struct sdio_uart_port, port);
+	int ret;
+
+	ret = sdio_uart_claim_func(port);
+	if (ret)
+		return;
+
+	sdio_uart_stop_rx(port);
+
+	/* Disable interrupts from this port */
+	sdio_release_irq(port->func);
+	port->ier = 0;
+	sdio_out(port, UART_IER, 0);
+
+	sdio_uart_clear_mctrl(port, TIOCM_OUT2);
+
+	/* Disable break condition and FIFOs. */
+	port->lcr &= ~UART_LCR_SBC;
+	sdio_out(port, UART_LCR, port->lcr);
+	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
+				 UART_FCR_CLEAR_RCVR |
+				 UART_FCR_CLEAR_XMIT);
+	sdio_out(port, UART_FCR, 0);
+
+	sdio_disable_func(port->func);
+
+	sdio_uart_release_func(port);
+}
+
+/**
+ *	sdio_uart_install	-	install method
+ *	@driver: the driver in use (sdio_uart in our case)
+ *	@tty: the tty being bound
+ *
+ *	Look up and bind the tty and the driver together. Initialize
+ *	any needed private data (in our case the termios)
+ */
+
+static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	int idx = tty->index;
+	struct sdio_uart_port *port = sdio_uart_port_get(idx);
+	int ret = tty_standard_install(driver, tty);
+
+	if (ret == 0)
+		/* This is the ref sdio_uart_port get provided */
+		tty->driver_data = port;
+	else
+		sdio_uart_port_put(port);
+	return ret;
+}
+
+/**
+ *	sdio_uart_cleanup	-	called on the last tty kref drop
+ *	@tty: the tty being destroyed
+ *
+ *	Called asynchronously when the last reference to the tty is dropped.
+ *	We cannot destroy the tty->driver_data port kref until this point
+ */
+
+static void sdio_uart_cleanup(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	tty->driver_data = NULL;	/* Bug trap */
+	sdio_uart_port_put(port);
+}
+
+/*
+ *	Open/close/hangup is now entirely boilerplate
+ */
+
+static int sdio_uart_open(struct tty_struct *tty, struct file *filp)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	return tty_port_open(&port->port, tty, filp);
+}
+
+static void sdio_uart_close(struct tty_struct *tty, struct file * filp)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	tty_port_close(&port->port, tty, filp);
+}
+
+static void sdio_uart_hangup(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	tty_port_hangup(&port->port);
+}
+
+static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
+			   int count)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	int ret;
+
+	if (!port->func)
+		return -ENODEV;
+
+	ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
+	if (!(port->ier & UART_IER_THRI)) {
+		int err = sdio_uart_claim_func(port);
+		if (!err) {
+			sdio_uart_start_tx(port);
+			sdio_uart_irq(port->func);
+			sdio_uart_release_func(port);
+		} else
+			ret = err;
+	}
+
+	return ret;
+}
+
+static int sdio_uart_write_room(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
+}
+
+static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	return kfifo_len(&port->xmit_fifo);
+}
+
+static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+
+	port->x_char = ch;
+	if (ch && !(port->ier & UART_IER_THRI)) {
+		if (sdio_uart_claim_func(port) != 0)
+			return;
+		sdio_uart_start_tx(port);
+		sdio_uart_irq(port->func);
+		sdio_uart_release_func(port);
+	}
+}
+
+static void sdio_uart_throttle(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+
+	if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS))
+		return;
+
+	if (sdio_uart_claim_func(port) != 0)
+		return;
+
+	if (I_IXOFF(tty)) {
+		port->x_char = STOP_CHAR(tty);
+		sdio_uart_start_tx(port);
+	}
+
+	if (tty->termios->c_cflag & CRTSCTS)
+		sdio_uart_clear_mctrl(port, TIOCM_RTS);
+
+	sdio_uart_irq(port->func);
+	sdio_uart_release_func(port);
+}
+
+static void sdio_uart_unthrottle(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+
+	if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS))
+		return;
+
+	if (sdio_uart_claim_func(port) != 0)
+		return;
+
+	if (I_IXOFF(tty)) {
+		if (port->x_char) {
+			port->x_char = 0;
+		} else {
+			port->x_char = START_CHAR(tty);
+			sdio_uart_start_tx(port);
+		}
+	}
+
+	if (tty->termios->c_cflag & CRTSCTS)
+		sdio_uart_set_mctrl(port, TIOCM_RTS);
+
+	sdio_uart_irq(port->func);
+	sdio_uart_release_func(port);
+}
+
+static void sdio_uart_set_termios(struct tty_struct *tty,
+						struct ktermios *old_termios)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	unsigned int cflag = tty->termios->c_cflag;
+
+	if (sdio_uart_claim_func(port) != 0)
+		return;
+
+	sdio_uart_change_speed(port, tty->termios, old_termios);
+
+	/* Handle transition to B0 status */
+	if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
+		sdio_uart_clear_mctrl(port, TIOCM_RTS | TIOCM_DTR);
+
+	/* Handle transition away from B0 status */
+	if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
+		unsigned int mask = TIOCM_DTR;
+		if (!(cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags))
+			mask |= TIOCM_RTS;
+		sdio_uart_set_mctrl(port, mask);
+	}
+
+	/* Handle turning off CRTSCTS */
+	if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
+		tty->hw_stopped = 0;
+		sdio_uart_start_tx(port);
+	}
+
+	/* Handle turning on CRTSCTS */
+	if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
+		if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) {
+			tty->hw_stopped = 1;
+			sdio_uart_stop_tx(port);
+		}
+	}
+
+	sdio_uart_release_func(port);
+}
+
+static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	int result;
+
+	result = sdio_uart_claim_func(port);
+	if (result != 0)
+		return result;
+
+	if (break_state == -1)
+		port->lcr |= UART_LCR_SBC;
+	else
+		port->lcr &= ~UART_LCR_SBC;
+	sdio_out(port, UART_LCR, port->lcr);
+
+	sdio_uart_release_func(port);
+	return 0;
+}
+
+static int sdio_uart_tiocmget(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	int result;
+
+	result = sdio_uart_claim_func(port);
+	if (!result) {
+		result = port->mctrl | sdio_uart_get_mctrl(port);
+		sdio_uart_release_func(port);
+	}
+
+	return result;
+}
+
+static int sdio_uart_tiocmset(struct tty_struct *tty,
+			      unsigned int set, unsigned int clear)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	int result;
+
+	result = sdio_uart_claim_func(port);
+	if (!result) {
+		sdio_uart_update_mctrl(port, set, clear);
+		sdio_uart_release_func(port);
+	}
+
+	return result;
+}
+
+static int sdio_uart_proc_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
+		       "", "", "");
+	for (i = 0; i < UART_NR; i++) {
+		struct sdio_uart_port *port = sdio_uart_port_get(i);
+		if (port) {
+			seq_printf(m, "%d: uart:SDIO", i);
+			if (capable(CAP_SYS_ADMIN)) {
+				seq_printf(m, " tx:%d rx:%d",
+					      port->icount.tx, port->icount.rx);
+				if (port->icount.frame)
+					seq_printf(m, " fe:%d",
+						      port->icount.frame);
+				if (port->icount.parity)
+					seq_printf(m, " pe:%d",
+						      port->icount.parity);
+				if (port->icount.brk)
+					seq_printf(m, " brk:%d",
+						      port->icount.brk);
+				if (port->icount.overrun)
+					seq_printf(m, " oe:%d",
+						      port->icount.overrun);
+				if (port->icount.cts)
+					seq_printf(m, " cts:%d",
+						      port->icount.cts);
+				if (port->icount.dsr)
+					seq_printf(m, " dsr:%d",
+						      port->icount.dsr);
+				if (port->icount.rng)
+					seq_printf(m, " rng:%d",
+						      port->icount.rng);
+				if (port->icount.dcd)
+					seq_printf(m, " dcd:%d",
+						      port->icount.dcd);
+			}
+			sdio_uart_port_put(port);
+			seq_putc(m, '\n');
+		}
+	}
+	return 0;
+}
+
+static int sdio_uart_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sdio_uart_proc_show, NULL);
+}
+
+static const struct file_operations sdio_uart_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= sdio_uart_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static const struct tty_port_operations sdio_uart_port_ops = {
+	.dtr_rts = uart_dtr_rts,
+	.carrier_raised = uart_carrier_raised,
+	.shutdown = sdio_uart_shutdown,
+	.activate = sdio_uart_activate,
+};
+
+static const struct tty_operations sdio_uart_ops = {
+	.open			= sdio_uart_open,
+	.close			= sdio_uart_close,
+	.write			= sdio_uart_write,
+	.write_room		= sdio_uart_write_room,
+	.chars_in_buffer	= sdio_uart_chars_in_buffer,
+	.send_xchar		= sdio_uart_send_xchar,
+	.throttle		= sdio_uart_throttle,
+	.unthrottle		= sdio_uart_unthrottle,
+	.set_termios		= sdio_uart_set_termios,
+	.hangup			= sdio_uart_hangup,
+	.break_ctl		= sdio_uart_break_ctl,
+	.tiocmget		= sdio_uart_tiocmget,
+	.tiocmset		= sdio_uart_tiocmset,
+	.install		= sdio_uart_install,
+	.cleanup		= sdio_uart_cleanup,
+	.proc_fops		= &sdio_uart_proc_fops,
+};
+
+static struct tty_driver *sdio_uart_tty_driver;
+
+static int sdio_uart_probe(struct sdio_func *func,
+			   const struct sdio_device_id *id)
+{
+	struct sdio_uart_port *port;
+	int ret;
+
+	port = kzalloc(sizeof(struct sdio_uart_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	if (func->class == SDIO_CLASS_UART) {
+		pr_warning("%s: need info on UART class basic setup\n",
+		       sdio_func_id(func));
+		kfree(port);
+		return -ENOSYS;
+	} else if (func->class == SDIO_CLASS_GPS) {
+		/*
+		 * We need tuple 0x91.  It contains SUBTPL_SIOREG
+		 * and SUBTPL_RCVCAPS.
+		 */
+		struct sdio_func_tuple *tpl;
+		for (tpl = func->tuples; tpl; tpl = tpl->next) {
+			if (tpl->code != 0x91)
+				continue;
+			if (tpl->size < 10)
+				continue;
+			if (tpl->data[1] == 0)  /* SUBTPL_SIOREG */
+				break;
+		}
+		if (!tpl) {
+			pr_warning(
+       "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
+			       sdio_func_id(func));
+			kfree(port);
+			return -EINVAL;
+		}
+		pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
+		       sdio_func_id(func), tpl->data[2], tpl->data[3]);
+		port->regs_offset = (tpl->data[4] << 0) |
+				    (tpl->data[5] << 8) |
+				    (tpl->data[6] << 16);
+		pr_debug("%s: regs offset = 0x%x\n",
+		       sdio_func_id(func), port->regs_offset);
+		port->uartclk = tpl->data[7] * 115200;
+		if (port->uartclk == 0)
+			port->uartclk = 115200;
+		pr_debug("%s: clk %d baudcode %u 4800-div %u\n",
+		       sdio_func_id(func), port->uartclk,
+		       tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
+	} else {
+		kfree(port);
+		return -EINVAL;
+	}
+
+	port->func = func;
+	sdio_set_drvdata(func, port);
+	tty_port_init(&port->port);
+	port->port.ops = &sdio_uart_port_ops;
+
+	ret = sdio_uart_add_port(port);
+	if (ret) {
+		kfree(port);
+	} else {
+		struct device *dev;
+		dev = tty_register_device(sdio_uart_tty_driver,
+						port->index, &func->dev);
+		if (IS_ERR(dev)) {
+			sdio_uart_port_remove(port);
+			ret = PTR_ERR(dev);
+		}
+	}
+
+	return ret;
+}
+
+static void sdio_uart_remove(struct sdio_func *func)
+{
+	struct sdio_uart_port *port = sdio_get_drvdata(func);
+
+	tty_unregister_device(sdio_uart_tty_driver, port->index);
+	sdio_uart_port_remove(port);
+}
+
+static const struct sdio_device_id sdio_uart_ids[] = {
+	{ SDIO_DEVICE_CLASS(SDIO_CLASS_UART)		},
+	{ SDIO_DEVICE_CLASS(SDIO_CLASS_GPS)		},
+	{ /* end: all zeroes */				},
+};
+
+MODULE_DEVICE_TABLE(sdio, sdio_uart_ids);
+
+static struct sdio_driver sdio_uart_driver = {
+	.probe		= sdio_uart_probe,
+	.remove		= sdio_uart_remove,
+	.name		= "sdio_uart",
+	.id_table	= sdio_uart_ids,
+};
+
+static int __init sdio_uart_init(void)
+{
+	int ret;
+	struct tty_driver *tty_drv;
+
+	sdio_uart_tty_driver = tty_drv = alloc_tty_driver(UART_NR);
+	if (!tty_drv)
+		return -ENOMEM;
+
+	tty_drv->driver_name = "sdio_uart";
+	tty_drv->name =   "ttySDIO";
+	tty_drv->major = 0;  /* dynamically allocated */
+	tty_drv->minor_start = 0;
+	tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+	tty_drv->subtype = SERIAL_TYPE_NORMAL;
+	tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	tty_drv->init_termios = tty_std_termios;
+	tty_drv->init_termios.c_cflag = B4800 | CS8 | CREAD | HUPCL | CLOCAL;
+	tty_drv->init_termios.c_ispeed = 4800;
+	tty_drv->init_termios.c_ospeed = 4800;
+	tty_set_operations(tty_drv, &sdio_uart_ops);
+
+	ret = tty_register_driver(tty_drv);
+	if (ret)
+		goto err1;
+
+	ret = sdio_register_driver(&sdio_uart_driver);
+	if (ret)
+		goto err2;
+
+	return 0;
+
+err2:
+	tty_unregister_driver(tty_drv);
+err1:
+	put_tty_driver(tty_drv);
+	return ret;
+}
+
+static void __exit sdio_uart_exit(void)
+{
+	sdio_unregister_driver(&sdio_uart_driver);
+	tty_unregister_driver(sdio_uart_tty_driver);
+	put_tty_driver(sdio_uart_tty_driver);
+}
+
+module_init(sdio_uart_init);
+module_exit(sdio_uart_exit);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_LICENSE("GPL");