zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/Kconfig b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/Kconfig
new file mode 100644
index 0000000..7351e4a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/Kconfig
@@ -0,0 +1,999 @@
+#
+# USB Gadget support on a system involves
+#    (a) a peripheral controller, and
+#    (b) the gadget driver using it.
+#
+# NOTE:  Gadget support ** DOES NOT ** depend on host-side CONFIG_USB !!
+#
+#  - Host systems (like PCs) need CONFIG_USB (with "A" jacks).
+#  - Peripherals (like PDAs) need CONFIG_USB_GADGET (with "B" jacks).
+#  - Some systems have both kinds of controllers.
+#
+# With help from a special transceiver and a "Mini-AB" jack, systems with
+# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
+#
+
+menuconfig USB_GADGET
+	tristate "USB Gadget Support"
+	select NLS
+	help
+	   USB is a master/slave protocol, organized with one master
+	   host (such as a PC) controlling up to 127 peripheral devices.
+	   The USB hardware is asymmetric, which makes it easier to set up:
+	   you can't connect a "to-the-host" connector to a peripheral.
+
+	   Linux can run in the host, or in the peripheral.  In both cases
+	   you need a low level bus controller driver, and some software
+	   talking to it.  Peripheral controllers are often discrete silicon,
+	   or are integrated with the CPU in a microcontroller.  The more
+	   familiar host side controllers have names like "EHCI", "OHCI",
+	   or "UHCI", and are usually integrated into southbridges on PC
+	   motherboards.
+
+	   Enable this configuration option if you want to run Linux inside
+	   a USB peripheral device.  Configure one hardware driver for your
+	   peripheral/device side bus controller, and a "gadget driver" for
+	   your peripheral protocol.  (If you use modular gadget drivers,
+	   you may configure more than one.)
+
+	   If in doubt, say "N" and don't enable these drivers; most people
+	   don't have this kind of hardware (except maybe inside Linux PDAs).
+
+	   For more information, see <http://www.linux-usb.org/gadget> and
+	   the kernel DocBook documentation for this API.
+
+if USB_GADGET
+
+config USB_GADGET_DEBUG
+	boolean "Debugging messages (DEVELOPMENT)"
+	depends on DEBUG_KERNEL
+	help
+	   Many controller and gadget drivers will print some debugging
+	   messages if you use this option to ask for those messages.
+
+	   Avoid enabling these messages, even if you're actively
+	   debugging such a driver.  Many drivers will emit so many
+	   messages that the driver timings are affected, which will
+	   either create new failure modes or remove the one you're
+	   trying to track down.  Never enable these messages for a
+	   production build.
+
+config USB_GADGET_DEBUG_FILES
+	boolean "Debugging information files (DEVELOPMENT)"
+	depends on PROC_FS
+	help
+	   Some of the drivers in the "gadget" framework can expose
+	   debugging information in files such as /proc/driver/udc
+	   (for a peripheral controller).  The information in these
+	   files may help when you're troubleshooting or bringing up a
+	   driver on a new board.   Enable these files by choosing "Y"
+	   here.  If in doubt, or to conserve kernel memory, say "N".
+
+config USB_GADGET_DEBUG_FS
+	boolean "Debugging information files in debugfs (DEVELOPMENT)"
+	depends on DEBUG_FS
+	help
+	   Some of the drivers in the "gadget" framework can expose
+	   debugging information in files under /sys/kernel/debug/.
+	   The information in these files may help when you're
+	   troubleshooting or bringing up a driver on a new board.
+	   Enable these files by choosing "Y" here.  If in doubt, or
+	   to conserve kernel memory, say "N".
+
+config USB_GADGET_VBUS_DRAW
+	int "Maximum VBUS Power usage (2-500 mA)"
+	range 2 500
+	default 2
+	help
+	   Some devices need to draw power from USB when they are
+	   configured, perhaps to operate circuitry or to recharge
+	   batteries.  This is in addition to any local power supply,
+	   such as an AC adapter or batteries.
+
+	   Enter the maximum power your device draws through USB, in
+	   milliAmperes.  The permitted range of values is 2 - 500 mA;
+	   0 mA would be legal, but can make some hosts misbehave.
+
+	   This value will be used except for system-specific gadget
+	   drivers that have more specific information.
+
+config USB_GADGET_STORAGE_NUM_BUFFERS
+	int "Number of storage pipeline buffers"
+	range 2 4
+	default 2
+	help
+	   Usually 2 buffers are enough to establish a good buffering
+	   pipeline. The number may be increased in order to compensate
+	   for a bursty VFS behaviour. For instance there may be CPU wake up
+	   latencies that makes the VFS to appear bursty in a system with
+	   an CPU on-demand governor. Especially if DMA is doing IO to
+	   offload the CPU. In this case the CPU will go into power
+	   save often and spin up occasionally to move data within VFS.
+	   If selecting USB_GADGET_DEBUG_FILES this value may be set by
+	   a module parameter as well.
+	   If unsure, say 2.
+
+#
+# USB Peripheral Controller Support
+#
+# The order here is alphabetical, except that integrated controllers go
+# before discrete ones so they will be the initial/default value:
+#   - integrated/SOC controllers first
+#   - licensed IP used in both SOC and discrete versions
+#   - discrete ones (including all PCI-only controllers)
+#   - debug/dummy gadget+hcd is last.
+#
+choice
+	prompt "USB Peripheral Controller"
+	help
+	   A USB device uses a controller to talk to its host.
+	   Systems should have only one such upstream link.
+	   Many controller drivers are platform-specific; these
+	   often need board-specific hooks.
+
+#
+# Integrated controllers
+#
+
+config USB_AT91
+	tristate "Atmel AT91 USB Device Port"
+	depends on ARCH_AT91
+	help
+	   Many Atmel AT91 processors (such as the AT91RM2000) have a
+	   full speed USB Device Port with support for five configurable
+	   endpoints (plus endpoint zero).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "at91_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_ATMEL_USBA
+	tristate "Atmel USBA"
+	select USB_GADGET_DUALSPEED
+	depends on AVR32 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
+	help
+	  USBA is the integrated high-speed USB Device controller on
+	  the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
+
+config USB_FSL_USB2
+	tristate "Freescale Highspeed USB DR Peripheral Controller"
+	depends on FSL_SOC || ARCH_MXC
+	select USB_GADGET_DUALSPEED
+	select USB_FSL_MPH_DR_OF if OF
+	help
+	   Some of Freescale PowerPC processors have a High Speed
+	   Dual-Role(DR) USB controller, which supports device mode.
+
+	   The number of programmable endpoints is different through
+	   SOC revisions.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "fsl_usb2_udc" and force
+	   all gadget drivers to also be dynamically linked.
+
+config USB_NONE
+	tristate "USB Peripheral Controller None"
+	help
+	   USB Peripheral Controller None
+
+config USB_FUSB300
+	tristate "Faraday FUSB300 USB Peripheral Controller"
+	depends on !PHYS_ADDR_T_64BIT
+	select USB_GADGET_DUALSPEED
+	help
+	   Faraday usb device controller FUSB300 driver
+
+config USB_OMAP
+	tristate "OMAP USB Device Controller"
+	depends on ARCH_OMAP
+	select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
+	select USB_OTG_UTILS if ARCH_OMAP
+	help
+	   Many Texas Instruments OMAP processors have flexible full
+	   speed USB device controllers, with support for up to 30
+	   endpoints (plus endpoint zero).  This driver supports the
+	   controller in the OMAP 1611, and should work with controllers
+	   in other OMAP processors too, given minor tweaks.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "omap_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_PXA25X
+	tristate "PXA 25x or IXP 4xx"
+	depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
+	select USB_OTG_UTILS
+	help
+	   Intel's PXA 25x series XScale ARM-5TE processors include
+	   an integrated full speed USB 1.1 device controller.  The
+	   controller in the IXP 4xx series is register-compatible.
+
+	   It has fifteen fixed-function endpoints, as well as endpoint
+	   zero (for control transfers).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "pxa25x_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+# if there's only one gadget driver, using only two bulk endpoints,
+# don't waste memory for the other endpoints
+config USB_PXA25X_SMALL
+	depends on USB_PXA25X
+	bool
+	default n if USB_ETH_RNDIS
+	default y if USB_ZERO
+	default y if USB_ETH
+	default y if USB_G_SERIAL
+
+config USB_R8A66597
+	tristate "Renesas R8A66597 USB Peripheral Controller"
+	select USB_GADGET_DUALSPEED
+	help
+	   R8A66597 is a discrete USB host and peripheral controller chip that
+	   supports both full and high speed USB 2.0 data transfers.
+	   It has nine configurable endpoints, and endpoint zero.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "r8a66597_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_RENESAS_USBHS_UDC
+	tristate 'Renesas USBHS controller'
+	depends on USB_RENESAS_USBHS
+	select USB_GADGET_DUALSPEED
+	help
+	   Renesas USBHS is a discrete USB host and peripheral controller chip
+	   that supports both full and high speed USB 2.0 data transfers.
+	   It has nine or more configurable endpoints, and endpoint zero.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "renesas_usbhs" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_PXA27X
+	tristate "PXA 27x"
+	depends on ARCH_PXA && (PXA27x || PXA3xx)
+	select USB_OTG_UTILS
+	help
+	   Intel's PXA 27x series XScale ARM v5TE processors include
+	   an integrated full speed USB 1.1 device controller.
+
+	   It has up to 23 endpoints, as well as endpoint zero (for
+	   control transfers).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "pxa27x_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_S3C_HSOTG
+	tristate "S3C HS/OtG USB Device controller"
+	depends on S3C_DEV_USB_HSOTG
+	select USB_GADGET_DUALSPEED
+	help
+	  The Samsung S3C64XX USB2.0 high-speed gadget controller
+	  integrated into the S3C64XX series SoC.
+
+config USB_IMX
+	tristate "Freescale i.MX1 USB Peripheral Controller"
+	depends on ARCH_MXC
+	help
+	   Freescale's i.MX1 includes an integrated full speed
+	   USB 1.1 device controller.
+
+	   It has Six fixed-function endpoints, as well as endpoint
+	   zero (for control transfers).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "imx_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_S3C2410
+	tristate "S3C2410 USB Device Controller"
+	depends on ARCH_S3C24XX
+	help
+	  Samsung's S3C2410 is an ARM-4 processor with an integrated
+	  full speed USB 1.1 device controller.  It has 4 configurable
+	  endpoints, as well as endpoint zero (for control transfers).
+
+	  This driver has been tested on the S3C2410, S3C2412, and
+	  S3C2440 processors.
+
+config USB_S3C2410_DEBUG
+	boolean "S3C2410 udc debug messages"
+	depends on USB_S3C2410
+
+config USB_S3C_HSUDC
+	tristate "S3C2416, S3C2443 and S3C2450 USB Device Controller"
+	depends on ARCH_S3C24XX
+	select USB_GADGET_DUALSPEED
+	help
+	  Samsung's S3C2416, S3C2443 and S3C2450 is an ARM9 based SoC
+	  integrated with dual speed USB 2.0 device controller. It has
+	  8 endpoints, as well as endpoint zero.
+
+	  This driver has been tested on S3C2416 and S3C2450 processors.
+
+config USB_MV_UDC
+	tristate "Marvell USB2.0 Device Controller"
+	select USB_GADGET_DUALSPEED
+	help
+	  Marvell Socs (including PXA and MMP series) include a high speed
+	  USB2.0 OTG controller, which can be configured as high speed or
+	  full speed USB peripheral.
+
+#
+# Controllers available in both integrated and discrete versions
+#
+
+# musb builds in ../musb along with host support
+config USB_GADGET_MUSB_HDRC
+	tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)"
+	depends on USB_MUSB_HDRC
+	select USB_GADGET_DUALSPEED
+	help
+	  This OTG-capable silicon IP is used in dual designs including
+	  the TI DaVinci, OMAP 243x, OMAP 343x, TUSB 6010, and ADI Blackfin
+
+config USB_M66592
+	tristate "Renesas M66592 USB Peripheral Controller"
+	select USB_GADGET_DUALSPEED
+	help
+	   M66592 is a discrete USB peripheral controller chip that
+	   supports both full and high speed USB 2.0 data transfers.
+	   It has seven configurable endpoints, and endpoint zero.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "m66592_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+#
+# Controllers available only in discrete form (and all PCI controllers)
+#
+
+config USB_AMD5536UDC
+	tristate "AMD5536 UDC"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	   The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
+	   It is a USB Highspeed DMA capable USB device controller. Beside ep0
+	   it provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
+	   The UDC port supports OTG operation, and may be used as a host port
+	   if it's not being used to implement peripheral or OTG roles.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "amd5536udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_FSL_QE
+	tristate "Freescale QE/CPM USB Device Controller"
+	depends on FSL_SOC && (QUICC_ENGINE || CPM)
+	help
+	   Some of Freescale PowerPC processors have a Full Speed
+	   QE/CPM2 USB controller, which support device mode with 4
+	   programmable endpoints. This driver supports the
+	   controller in the MPC8360 and MPC8272, and should work with
+	   controllers having QE or CPM2, given minor tweaks.
+
+	   Set CONFIG_USB_GADGET to "m" to build this driver as a
+	   dynamically linked module called "fsl_qe_udc".
+
+config USB_CI13XXX_PCI
+	tristate "MIPS USB CI13xxx PCI UDC"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	  MIPS USB IP core family device controller
+	  Currently it only supports IP part number CI13412
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "ci13xxx_udc" and force all
+	  gadget drivers to also be dynamically linked.
+
+config USB_NET2272
+	tristate "PLX NET2272"
+	select USB_GADGET_DUALSPEED
+	help
+	  PLX NET2272 is a USB peripheral controller which supports
+	  both full and high speed USB 2.0 data transfers.
+
+	  It has three configurable endpoints, as well as endpoint zero
+	  (for control transfer).
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "net2272" and force all
+	  gadget drivers to also be dynamically linked.
+
+config USB_NET2272_DMA
+	boolean "Support external DMA controller"
+	depends on USB_NET2272
+	help
+	  The NET2272 part can optionally support an external DMA
+	  controller, but your board has to have support in the
+	  driver itself.
+
+	  If unsure, say "N" here.  The driver works fine in PIO mode.
+
+config USB_NET2280
+	tristate "NetChip 228x"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	   NetChip 2280 / 2282 is a PCI based USB peripheral controller which
+	   supports both full and high speed USB 2.0 data transfers.
+
+	   It has six configurable endpoints, as well as endpoint zero
+	   (for control transfers) and several endpoints with dedicated
+	   functions.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "net2280" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_GOKU
+	tristate "Toshiba TC86C001 'Goku-S'"
+	depends on PCI
+	help
+	   The Toshiba TC86C001 is a PCI device which includes controllers
+	   for full speed USB devices, IDE, I2C, SIO, plus a USB host (OHCI).
+
+	   The device controller has three configurable (bulk or interrupt)
+	   endpoints, plus endpoint zero (for control transfers).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "goku_udc" and to force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_LANGWELL
+	tristate "Intel Langwell USB Device Controller"
+	depends on PCI
+	depends on !PHYS_ADDR_T_64BIT
+	select USB_GADGET_DUALSPEED
+	help
+	   Intel Langwell USB Device Controller is a High-Speed USB
+	   On-The-Go device controller.
+
+	   The number of programmable endpoints is different through
+	   controller revision.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "langwell_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_EG20T
+	tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	  This is a USB device driver for EG20T PCH.
+	  EG20T PCH is the platform controller hub that is used in Intel's
+	  general embedded platform. EG20T PCH has USB device interface.
+	  Using this interface, it is able to access system devices connected
+	  to USB device.
+	  This driver enables USB device function.
+	  USB device is a USB peripheral controller which
+	  supports both full and high speed USB 2.0 data transfers.
+	  This driver supports both control transfer and bulk transfer modes.
+	  This driver dose not support interrupt transfer or isochronous
+	  transfer modes.
+
+	  This driver also can be used for LAPIS Semiconductor's ML7213 which is
+	  for IVI(In-Vehicle Infotainment) use.
+	  ML7831 is for general purpose use.
+	  ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+	  ML7213/ML7831 is completely compatible for Intel EG20T PCH.
+
+config USB_CI13XXX_MSM
+	tristate "MIPS USB CI13xxx for MSM"
+	depends on ARCH_MSM
+	select USB_GADGET_DUALSPEED
+	select USB_MSM_OTG
+	help
+	  MSM SoC has chipidea USB controller.  This driver uses
+	  ci13xxx_udc core.
+	  This driver depends on OTG driver for PHY initialization,
+	  clock management, powering up VBUS, and power management.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "ci13xxx_msm" and force all
+	  gadget drivers to also be dynamically linked.
+
+#
+# LAST -- dummy/emulated controller
+#
+
+config USB_DUMMY_HCD
+	tristate "Dummy HCD (DEVELOPMENT)"
+	depends on USB=y || (USB=m && USB_GADGET=m)
+	select USB_GADGET_DUALSPEED
+	select USB_GADGET_SUPERSPEED
+	help
+	  This host controller driver emulates USB, looping all data transfer
+	  requests back to a USB "gadget driver" in the same host.  The host
+	  side is the master; the gadget side is the slave.  Gadget drivers
+	  can be high, full, or low speed; and they have access to endpoints
+	  like those from NET2280, PXA2xx, or SA1100 hardware.
+
+	  This may help in some stages of creating a driver to embed in a
+	  Linux device, since it lets you debug several parts of the gadget
+	  driver without its hardware or drivers being involved.
+
+	  Since such a gadget side driver needs to interoperate with a host
+	  side Linux-USB device driver, this may help to debug both sides
+	  of a USB protocol stack.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "dummy_hcd" and force all
+	  gadget drivers to also be dynamically linked.
+
+# NOTE:  Please keep dummy_hcd LAST so that "real hardware" appears
+# first and will be selected by default.
+
+endchoice
+
+# Selected by UDC drivers that support high-speed operation.
+config USB_GADGET_DUALSPEED
+	bool
+
+# Selected by UDC drivers that support super-speed opperation
+config USB_GADGET_SUPERSPEED
+	bool
+	depends on USB_GADGET_DUALSPEED
+
+#
+# USB Gadget Drivers
+#
+choice
+	tristate "USB Gadget Drivers"
+	default USB_GADGET_NONE
+	help
+	  A Linux "Gadget Driver" talks to the USB Peripheral Controller
+	  driver through the abstract "gadget" API.  Some other operating
+	  systems call these "client" drivers, of which "class drivers"
+	  are a subset (implementing a USB device class specification).
+	  A gadget driver implements one or more USB functions using
+	  the peripheral hardware.
+
+	  Gadget drivers are hardware-neutral, or "platform independent",
+	  except that they sometimes must understand quirks or limitations
+	  of the particular controllers they work with.  For example, when
+	  a controller doesn't support alternate configurations or provide
+	  enough of the right types of endpoints, the gadget driver might
+	  not be able work with that controller, or might need to implement
+	  a less common variant of a device class protocol.
+
+# this first set of drivers all depend on bulk-capable hardware.
+
+config USB_GADGET_NONE
+	tristate "USB Gadget None"
+	help
+	  USB Gadget None
+
+config USB_ZERO
+	tristate "Gadget Zero (DEVELOPMENT)"
+	help
+	  Gadget Zero is a two-configuration device.  It either sinks and
+	  sources bulk data; or it loops back a configurable number of
+	  transfers.  It also implements control requests, for "chapter 9"
+	  conformance.  The driver needs only two bulk-capable endpoints, so
+	  it can work on top of most device-side usb controllers.  It's
+	  useful for testing, and is also a working example showing how
+	  USB "gadget drivers" can be written.
+
+	  Make this be the first driver you try using on top of any new
+	  USB peripheral controller driver.  Then you can use host-side
+	  test software, like the "usbtest" driver, to put your hardware
+	  and its driver through a basic set of functional tests.
+
+	  Gadget Zero also works with the host-side "usb-skeleton" driver,
+	  and with many kinds of host-side test software.  You may need
+	  to tweak product and vendor IDs before host software knows about
+	  this device, and arrange to select an appropriate configuration.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_zero".
+
+config USB_ZERO_HNPTEST
+	boolean "HNP Test Device"
+	depends on USB_ZERO && USB_OTG
+	help
+	  You can configure this device to enumerate using the device
+	  identifiers of the USB-OTG test device.  That means that when
+	  this gadget connects to another OTG device, with this one using
+	  the "B-Peripheral" role, that device will use HNP to let this
+	  one serve as the USB host instead (in the "B-Host" role).
+
+config USB_AUDIO
+	tristate "Audio Gadget (EXPERIMENTAL)"
+	depends on SND
+	select SND_PCM
+	help
+	  This Gadget Audio driver is compatible with USB Audio Class
+	  specification 2.0. It implements 1 AudioControl interface,
+	  1 AudioStreaming Interface each for USB-OUT and USB-IN.
+	  Number of channels, sample rate and sample size can be
+	  specified as module parameters.
+	  This driver doesn't expect any real Audio codec to be present
+	  on the device - the audio streams are simply sinked to and
+	  sourced from a virtual ALSA sound card created. The user-space
+	  application may choose to do whatever it wants with the data
+	  received from the USB Host and choose to provide whatever it
+	  wants as audio data to the USB Host.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_audio".
+
+config GADGET_UAC1
+	bool "UAC 1.0 (Legacy)"
+	depends on USB_AUDIO
+	help
+	  If you instead want older UAC Spec-1.0 driver that also has audio
+	  paths hardwired to the Audio codec chip on-board and doesn't work
+	  without one.
+
+config USB_ETH
+	tristate "Ethernet Gadget (with CDC Ethernet support)"
+	depends on NET
+	select CRC32
+	help
+	  This driver implements Ethernet style communication, in one of
+	  several ways:
+	  
+	   - The "Communication Device Class" (CDC) Ethernet Control Model.
+	     That protocol is often avoided with pure Ethernet adapters, in
+	     favor of simpler vendor-specific hardware, but is widely
+	     supported by firmware for smart network devices.
+
+	   - On hardware can't implement that protocol, a simple CDC subset
+	     is used, placing fewer demands on USB.
+
+	   - CDC Ethernet Emulation Model (EEM) is a newer standard that has
+	     a simpler interface that can be used by more USB hardware.
+
+	  RNDIS support is an additional option, more demanding than than
+	  subset.
+
+	  Within the USB device, this gadget driver exposes a network device
+	  "usbX", where X depends on what other networking devices you have.
+	  Treat it like a two-node Ethernet link:  host, and gadget.
+
+	  The Linux-USB host-side "usbnet" driver interoperates with this
+	  driver, so that deep I/O queues can be supported.  On 2.4 kernels,
+	  use "CDCEther" instead, if you're using the CDC option. That CDC
+	  mode should also interoperate with standard CDC Ethernet class
+	  drivers on other host operating systems.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_ether".
+
+config USB_ETH_RNDIS
+	bool "RNDIS support"
+	depends on USB_ETH
+	default y
+	help
+	   Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
+	   and Microsoft provides redistributable binary RNDIS drivers for
+	   older versions of Windows.
+
+	   If you say "y" here, the Ethernet gadget driver will try to provide
+	   a second device configuration, supporting RNDIS to talk to such
+	   Microsoft USB hosts.
+	   
+	   To make MS-Windows work with this, use Documentation/usb/linux.inf
+	   as the "driver info file".  For versions of MS-Windows older than
+	   XP, you'll need to download drivers from Microsoft's website; a URL
+	   is given in comments found in that info file.
+
+config USB_ETH_EEM
+       bool "Ethernet Emulation Model (EEM) support"
+       depends on USB_ETH
+       default n
+       help
+         CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
+         and therefore can be supported by more hardware.  Technically ECM and
+         EEM are designed for different applications.  The ECM model extends
+         the network interface to the target (e.g. a USB cable modem), and the
+         EEM model is for mobile devices to communicate with hosts using
+         ethernet over USB.  For Linux gadgets, however, the interface with
+         the host is the same (a usbX device), so the differences are minimal.
+
+         If you say "y" here, the Ethernet gadget driver will use the EEM
+         protocol rather than ECM.  If unsure, say "n".
+
+config USB_G_NCM
+	tristate "Network Control Model (NCM) support"
+	depends on NET
+	select CRC32
+	help
+	  This driver implements USB CDC NCM subclass standard. NCM is
+	  an advanced protocol for Ethernet encapsulation, allows grouping
+	  of several ethernet frames into one USB transfer and different
+	  alignment possibilities.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_ncm".
+
+config USB_GADGETFS
+	tristate "Gadget Filesystem (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  This driver provides a filesystem based API that lets user mode
+	  programs implement a single-configuration USB device, including
+	  endpoint I/O and control requests that don't relate to enumeration.
+	  All endpoints, transfer speeds, and transfer types supported by
+	  the hardware are available, through read() and write() calls.
+
+	  Currently, this option is still labelled as EXPERIMENTAL because
+	  of existing race conditions in the underlying in-kernel AIO core.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "gadgetfs".
+
+config USB_FUNCTIONFS
+	tristate "Function Filesystem (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	select USB_FUNCTIONFS_GENERIC if !(USB_FUNCTIONFS_ETH || USB_FUNCTIONFS_RNDIS)
+	help
+	  The Function Filesystem (FunctionFS) lets one create USB
+	  composite functions in user space in the same way GadgetFS
+	  lets one create USB gadgets in user space.  This allows creation
+	  of composite gadgets such that some of the functions are
+	  implemented in kernel space (for instance Ethernet, serial or
+	  mass storage) and other are implemented in user space.
+
+	  If you say "y" or "m" here you will be able what kind of
+	  configurations the gadget will provide.
+
+	  Say "y" to link the driver statically, or "m" to build
+	  a dynamically linked module called "g_ffs".
+
+config USB_FUNCTIONFS_ETH
+	bool "Include configuration with CDC ECM (Ethernet)"
+	depends on USB_FUNCTIONFS && NET
+	help
+	  Include a configuration with CDC ECM function (Ethernet) and the
+	  Function Filesystem.
+
+config USB_FUNCTIONFS_RNDIS
+	bool "Include configuration with RNDIS (Ethernet)"
+	depends on USB_FUNCTIONFS && NET
+	help
+	  Include a configuration with RNDIS function (Ethernet) and the Filesystem.
+
+config USB_FUNCTIONFS_GENERIC
+	bool "Include 'pure' configuration"
+	depends on USB_FUNCTIONFS
+	help
+	  Include a configuration with the Function Filesystem alone with
+	  no Ethernet interface.
+
+config USB_FILE_STORAGE
+	tristate "File-backed Storage Gadget (DEPRECATED)"
+	depends on BLOCK
+	help
+	  The File-backed Storage Gadget acts as a USB Mass Storage
+	  disk drive.  As its storage repository it can use a regular
+	  file or a block device (in much the same way as the "loop"
+	  device driver), specified as a module parameter.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_file_storage".
+
+	  NOTE: This driver is deprecated.  Its replacement is the
+	  Mass Storage Gadget.
+
+config USB_FILE_STORAGE_TEST
+	bool "File-backed Storage Gadget testing version"
+	depends on USB_FILE_STORAGE
+	default n
+	help
+	  Say "y" to generate the larger testing version of the
+	  File-backed Storage Gadget, useful for probing the
+	  behavior of USB Mass Storage hosts.  Not needed for
+	  normal operation.
+
+config USB_MASS_STORAGE
+	tristate "Mass Storage Gadget"
+	depends on BLOCK
+	help
+	  The Mass Storage Gadget acts as a USB Mass Storage disk drive.
+	  As its storage repository it can use a regular file or a block
+	  device (in much the same way as the "loop" device driver),
+	  specified as a module parameter or sysfs option.
+
+	  This driver is an updated replacement for the deprecated
+	  File-backed Storage Gadget (g_file_storage).
+
+	  Say "y" to link the driver statically, or "m" to build
+	  a dynamically linked module called "g_mass_storage".
+
+config USB_G_SERIAL
+	tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
+	help
+	  The Serial Gadget talks to the Linux-USB generic serial driver.
+	  This driver supports a CDC-ACM module option, which can be used
+	  to interoperate with MS-Windows hosts or with the Linux-USB
+	  "cdc-acm" driver.
+
+	  This driver also supports a CDC-OBEX option.  You will need a
+	  user space OBEX server talking to /dev/ttyGS*, since the kernel
+	  itself doesn't implement the OBEX protocol.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_serial".
+
+	  For more information, see Documentation/usb/gadget_serial.txt
+	  which includes instructions and a "driver info file" needed to
+	  make MS-Windows work with CDC ACM.
+
+config USB_MIDI_GADGET
+	tristate "MIDI Gadget (EXPERIMENTAL)"
+	depends on SND && EXPERIMENTAL
+	select SND_RAWMIDI
+	help
+	  The MIDI Gadget acts as a USB Audio device, with one MIDI
+	  input and one MIDI output. These MIDI jacks appear as
+	  a sound "card" in the ALSA sound system. Other MIDI
+	  connections can then be made on the gadget system, using
+	  ALSA's aconnect utility etc.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_midi".
+
+config USB_G_PRINTER
+	tristate "Printer Gadget"
+	help
+	  The Printer Gadget channels data between the USB host and a
+	  userspace program driving the print engine. The user space
+	  program reads and writes the device file /dev/g_printer to
+	  receive or send printer data. It can use ioctl calls to
+	  the device file to get or set printer status.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_printer".
+
+	  For more information, see Documentation/usb/gadget_printer.txt
+	  which includes sample code for accessing the device file.
+
+config USB_G_ANDROID
+	boolean "Android Composite Gadget"
+	help
+	  The Android Composite Gadget supports multiple USB
+	  functions: adb, acm, mass storage, mtp, accessory
+	  and rndis.
+	  Each function can be configured and enabled/disabled
+	  dynamically from userspace through a sysfs interface.
+
+config USB_CDC_COMPOSITE
+	tristate "CDC Composite Device (Ethernet and ACM)"
+	depends on NET
+	help
+	  This driver provides two functions in one configuration:
+	  a CDC Ethernet (ECM) link, and a CDC ACM (serial port) link.
+
+	  This driver requires four bulk and two interrupt endpoints,
+	  plus the ability to handle altsettings.  Not all peripheral
+	  controllers are that capable.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module.
+
+config USB_G_NOKIA
+	tristate "Nokia composite gadget"
+	depends on PHONET
+	help
+	  The Nokia composite gadget provides support for acm, obex
+	  and phonet in only one composite gadget driver.
+
+	  It's only really useful for N900 hardware. If you're building
+	  a kernel for N900, say Y or M here. If unsure, say N.
+
+config USB_G_ACM_MS
+	tristate "CDC Composite Device (ACM and mass storage)"
+	depends on BLOCK
+	help
+	  This driver provides two functions in one configuration:
+	  a mass storage, and a CDC ACM (serial port) link.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_acm_ms".
+
+config USB_G_MULTI
+	tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
+	depends on BLOCK && NET
+	select USB_G_MULTI_CDC if !USB_G_MULTI_RNDIS
+	help
+	  The Multifunction Composite Gadget provides Ethernet (RNDIS
+	  and/or CDC Ethernet), mass storage and ACM serial link
+	  interfaces.
+
+	  You will be asked to choose which of the two configurations is
+	  to be available in the gadget.  At least one configuration must
+	  be chosen to make the gadget usable.  Selecting more than one
+	  configuration will prevent Windows from automatically detecting
+	  the gadget as a composite gadget, so an INF file will be needed to
+	  use the gadget.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_multi".
+
+config USB_G_MULTI_RNDIS
+	bool "RNDIS + CDC Serial + Storage configuration"
+	depends on USB_G_MULTI
+	default y
+	help
+	  This option enables a configuration with RNDIS, CDC Serial and
+	  Mass Storage functions available in the Multifunction Composite
+	  Gadget.  This is the configuration dedicated for Windows since RNDIS
+	  is Microsoft's protocol.
+
+	  If unsure, say "y".
+
+config USB_G_MULTI_CDC
+	bool "CDC Ethernet + CDC Serial + Storage configuration"
+	depends on USB_G_MULTI
+	default n
+	help
+	  This option enables a configuration with CDC Ethernet (ECM), CDC
+	  Serial and Mass Storage functions available in the Multifunction
+	  Composite Gadget.
+
+	  If unsure, say "y".
+
+config USB_G_HID
+	tristate "HID Gadget"
+	help
+	  The HID gadget driver provides generic emulation of USB
+	  Human Interface Devices (HID).
+
+	  For more information, see Documentation/usb/gadget_hid.txt which
+	  includes sample code for accessing the device files.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_hid".
+
+config USB_G_DBGP
+	tristate "EHCI Debug Device Gadget"
+	help
+	  This gadget emulates an EHCI Debug device. This is useful when you want
+	  to interact with an EHCI Debug Port.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_dbgp".
+
+if USB_G_DBGP
+choice
+	prompt "EHCI Debug Device mode"
+	default USB_G_DBGP_SERIAL
+
+config USB_G_DBGP_PRINTK
+	depends on USB_G_DBGP
+	bool "printk"
+	help
+	  Directly printk() received data. No interaction.
+
+config USB_G_DBGP_SERIAL
+	depends on USB_G_DBGP
+	bool "serial"
+	help
+	  Userland can interact using /dev/ttyGSxxx.
+endchoice
+endif
+
+# put drivers that need isochronous transfer support (for audio
+# or video class gadget drivers), or specific hardware, here.
+config USB_G_WEBCAM
+	tristate "USB Webcam Gadget"
+	depends on VIDEO_DEV
+	help
+	  The Webcam Gadget acts as a composite USB Audio and Video Class
+	  device. It provides a userspace API to process UVC control requests
+	  and stream video data to the host.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_webcam".
+
+endchoice
+
+endif # USB_GADGET
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/Makefile b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/Makefile
new file mode 100644
index 0000000..caab056
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/Makefile
@@ -0,0 +1,79 @@
+#
+# USB peripheral controller drivers
+#
+ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_USB_GADGET)	+= udc-core.o
+obj-$(CONFIG_USB_DUMMY_HCD)	+= dummy_hcd.o
+obj-$(CONFIG_USB_NET2272)	+= net2272.o
+obj-$(CONFIG_USB_NET2280)	+= net2280.o
+obj-$(CONFIG_USB_AMD5536UDC)	+= amd5536udc.o
+obj-$(CONFIG_USB_PXA25X)	+= pxa25x_udc.o
+obj-$(CONFIG_USB_PXA27X)	+= pxa27x_udc.o
+obj-$(CONFIG_USB_IMX)		+= imx_udc.o
+obj-$(CONFIG_USB_GOKU)		+= goku_udc.o
+obj-$(CONFIG_USB_OMAP)		+= omap_udc.o
+obj-$(CONFIG_USB_S3C2410)	+= s3c2410_udc.o
+obj-$(CONFIG_USB_AT91)		+= at91_udc.o
+obj-$(CONFIG_USB_ATMEL_USBA)	+= atmel_usba_udc.o
+obj-$(CONFIG_USB_FSL_USB2)	+= fsl_usb2_udc.o
+fsl_usb2_udc-y			:= fsl_udc_core.o
+fsl_usb2_udc-$(CONFIG_ARCH_MXC)	+= fsl_mxc_udc.o
+obj-$(CONFIG_USB_M66592)	+= m66592-udc.o
+obj-$(CONFIG_USB_R8A66597)	+= r8a66597-udc.o
+obj-$(CONFIG_USB_FSL_QE)	+= fsl_qe_udc.o
+obj-$(CONFIG_USB_CI13XXX_PCI)	+= ci13xxx_pci.o
+obj-$(CONFIG_USB_S3C_HSOTG)	+= s3c-hsotg.o
+obj-$(CONFIG_USB_S3C_HSUDC)	+= s3c-hsudc.o
+obj-$(CONFIG_USB_LANGWELL)	+= langwell_udc.o
+obj-$(CONFIG_USB_EG20T)		+= pch_udc.o
+obj-$(CONFIG_USB_MV_UDC)	+= mv_udc.o
+mv_udc-y			:= mv_udc_core.o
+obj-$(CONFIG_USB_CI13XXX_MSM)	+= ci13xxx_msm.o
+obj-$(CONFIG_USB_FUSB300)	+= fusb300_udc.o
+
+#
+# USB gadget drivers
+#
+g_zero-y			:= zero.o
+g_audio-y			:= audio.o
+g_ether-y			:= ether.o
+g_serial-y			:= serial.o
+g_midi-y			:= gmidi.o
+gadgetfs-y			:= inode.o
+g_file_storage-y		:= file_storage.o
+g_mass_storage-y		:= mass_storage.o
+g_printer-y			:= printer.o
+g_cdc-y				:= cdc2.o
+g_multi-y			:= multi.o
+g_hid-y				:= hid.o
+g_dbgp-y			:= dbgp.o
+g_nokia-y			:= nokia.o
+g_webcam-y			:= webcam.o
+g_ncm-y				:= ncm.o
+g_acm_ms-y			:= acm_ms.o
+
+g_android-y			:= android.o  multi_packet.o
+
+obj-$(CONFIG_USB_ZERO)		+= g_zero.o
+obj-$(CONFIG_USB_AUDIO)		+= g_audio.o
+obj-$(CONFIG_USB_ETH)		+= g_ether.o
+obj-$(CONFIG_USB_GADGETFS)	+= gadgetfs.o
+obj-$(CONFIG_USB_FUNCTIONFS)	+= g_ffs.o
+obj-$(CONFIG_USB_FILE_STORAGE)	+= g_file_storage.o
+obj-$(CONFIG_USB_MASS_STORAGE)	+= g_mass_storage.o
+obj-$(CONFIG_USB_G_SERIAL)	+= g_serial.o
+obj-$(CONFIG_USB_G_PRINTER)	+= g_printer.o
+obj-$(CONFIG_USB_MIDI_GADGET)	+= g_midi.o
+obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
+obj-$(CONFIG_USB_G_HID)		+= g_hid.o
+obj-$(CONFIG_USB_G_DBGP)	+= g_dbgp.o
+obj-$(CONFIG_USB_G_MULTI)	+= g_multi.o
+obj-$(CONFIG_USB_G_NOKIA)	+= g_nokia.o
+obj-$(CONFIG_USB_G_WEBCAM)	+= g_webcam.o
+obj-$(CONFIG_USB_G_NCM)		+= g_ncm.o
+obj-$(CONFIG_USB_G_ACM_MS)	+= g_acm_ms.o
+#obj-$(CONFIG_USB_G_ANDROID)	+= g_android.o
+obj-y	+= g_android.o
+#obj-m	+= dw2_udc.o
+obj-y += usb_netlink.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/acm_ms.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/acm_ms.c
new file mode 100644
index 0000000..fdb7aec
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/acm_ms.c
@@ -0,0 +1,256 @@
+/*
+ * acm_ms.c -- Composite driver, with ACM and mass storage support
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: David Brownell
+ * Modified: Klaus Schwarzkopf <schwarzkopf@sensortherm.de>
+ *
+ * Heavily based on multi.c and cdc2.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+#include "u_serial.h"
+
+#define DRIVER_DESC		"Composite Gadget (ACM + MS)"
+#define DRIVER_VERSION		"2011/10/10"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+#define ACM_MS_VENDOR_NUM	0x1d6b	/* Linux Foundation */
+#define ACM_MS_PRODUCT_NUM	0x0106	/* Composite Gadget: ACM + MS*/
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_mass_storage.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+
+	.bDeviceClass =		USB_CLASS_MISC /* 0xEF */,
+	.bDeviceSubClass =	2,
+	.bDeviceProtocol =	1,
+
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id can be overridden by module parameters.  */
+	.idVendor =		cpu_to_le16(ACM_MS_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(ACM_MS_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	/*.bNumConfigurations =	DYNAMIC*/
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/*
+	 * REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+/****************************** Configurations ******************************/
+
+static struct fsg_module_parameters fsg_mod_data = { .stall = 1 };
+FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
+
+static struct fsg_common fsg_common;
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * We _always_ have both ACM and mass storage functions.
+ */
+static int __init acm_ms_do_config(struct usb_configuration *c)
+{
+	int	status;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+
+	status = acm_bind_config(c, 0);
+	if (status < 0)
+		return status;
+
+	status = fsg_bind_config(c->cdev, c, &fsg_common);
+	if (status < 0)
+		return status;
+
+	return 0;
+}
+
+static struct usb_configuration acm_ms_config_driver = {
+	.label			= DRIVER_DESC,
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init acm_ms_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			status;
+	void			*retp;
+
+	/* set up serial link layer */
+	status = gserial_setup(cdev->gadget, 1);
+	if (status < 0)
+		return status;
+
+	/* set up mass storage function */
+	retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data);
+	if (IS_ERR(retp)) {
+		status = PTR_ERR(retp);
+		goto fail0;
+	}
+
+	/* set bcdDevice */
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0) {
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	} else {
+		WARNING(cdev, "controller '%s' not recognized; trying %s\n",
+				gadget->name,
+				acm_ms_config_driver.label);
+		device_desc.bcdDevice =
+			cpu_to_le16(0x0300 | 0x0099);
+	}
+
+	/*
+	 * Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail1;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail1;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	/* register our configuration */
+	status = usb_add_config(cdev, &acm_ms_config_driver, acm_ms_do_config);
+	if (status < 0)
+		goto fail1;
+
+	dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
+			DRIVER_DESC);
+	fsg_common_put(&fsg_common);
+	return 0;
+
+	/* error recovery */
+fail1:
+	fsg_common_put(&fsg_common);
+fail0:
+	gserial_cleanup();
+	return status;
+}
+
+static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
+{
+	gserial_cleanup();
+
+	return 0;
+}
+
+static struct usb_composite_driver acm_ms_driver = {
+	.name		= "g_acm_ms",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.unbind		= __exit_p(acm_ms_unbind),
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Klaus Schwarzkopf <schwarzkopf@sensortherm.de>");
+MODULE_LICENSE("GPL v2");
+
+static int __init init(void)
+{
+	return usb_composite_probe(&acm_ms_driver, acm_ms_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&acm_ms_driver);
+}
+module_exit(cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/amd5536udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/amd5536udc.c
new file mode 100644
index 0000000..7777927
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/amd5536udc.c
@@ -0,0 +1,3422 @@
+/*
+ * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2005-2007 AMD (http://www.amd.com)
+ * Author: Thomas Dahlmann
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
+ * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
+ * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
+ *
+ * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
+ * be used as host port) and UOC bits PAD_EN and APU are set (should be done
+ * by BIOS init).
+ *
+ * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
+ * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
+ * can be used with gadget ether.
+ */
+
+/* debug control */
+/* #define UDC_VERBOSE */
+
+/* Driver strings */
+#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
+#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
+
+/* system */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/dmapool.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/prefetch.h>
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+/* gadget stack */
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* udc specific */
+#include "amd5536udc.h"
+
+
+static void udc_tasklet_disconnect(unsigned long);
+static void empty_req_queue(struct udc_ep *);
+static int udc_probe(struct udc *dev);
+static void udc_basic_init(struct udc *dev);
+static void udc_setup_endpoints(struct udc *dev);
+static void udc_soft_reset(struct udc *dev);
+static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
+static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
+static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
+static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
+				unsigned long buf_len, gfp_t gfp_flags);
+static int udc_remote_wakeup(struct udc *dev);
+static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void udc_pci_remove(struct pci_dev *pdev);
+
+/* description */
+static const char mod_desc[] = UDC_MOD_DESCRIPTION;
+static const char name[] = "amd5536udc";
+
+/* structure to hold endpoint function pointers */
+static const struct usb_ep_ops udc_ep_ops;
+
+/* received setup data */
+static union udc_setup_data setup_data;
+
+/* pointer to device object */
+static struct udc *udc;
+
+/* irq spin lock for soft reset */
+static DEFINE_SPINLOCK(udc_irq_spinlock);
+/* stall spin lock */
+static DEFINE_SPINLOCK(udc_stall_spinlock);
+
+/*
+* slave mode: pending bytes in rx fifo after nyet,
+* used if EPIN irq came but no req was available
+*/
+static unsigned int udc_rxfifo_pending;
+
+/* count soft resets after suspend to avoid loop */
+static int soft_reset_occured;
+static int soft_reset_after_usbreset_occured;
+
+/* timer */
+static struct timer_list udc_timer;
+static int stop_timer;
+
+/* set_rde -- Is used to control enabling of RX DMA. Problem is
+ * that UDC has only one bit (RDE) to enable/disable RX DMA for
+ * all OUT endpoints. So we have to handle race conditions like
+ * when OUT data reaches the fifo but no request was queued yet.
+ * This cannot be solved by letting the RX DMA disabled until a
+ * request gets queued because there may be other OUT packets
+ * in the FIFO (important for not blocking control traffic).
+ * The value of set_rde controls the correspondig timer.
+ *
+ * set_rde -1 == not used, means it is alloed to be set to 0 or 1
+ * set_rde  0 == do not touch RDE, do no start the RDE timer
+ * set_rde  1 == timer function will look whether FIFO has data
+ * set_rde  2 == set by timer function to enable RX DMA on next call
+ */
+static int set_rde = -1;
+
+static DECLARE_COMPLETION(on_exit);
+static struct timer_list udc_pollstall_timer;
+static int stop_pollstall_timer;
+static DECLARE_COMPLETION(on_pollstall_exit);
+
+/* tasklet for usb disconnect */
+static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
+		(unsigned long) &udc);
+
+
+/* endpoint names used for print */
+static const char ep0_string[] = "ep0in";
+static const char *const ep_string[] = {
+	ep0_string,
+	"ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
+	"ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
+	"ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
+	"ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
+	"ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
+	"ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
+	"ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
+};
+
+/* DMA usage flag */
+static bool use_dma = 1;
+/* packet per buffer dma */
+static bool use_dma_ppb = 1;
+/* with per descr. update */
+static bool use_dma_ppb_du;
+/* buffer fill mode */
+static int use_dma_bufferfill_mode;
+/* full speed only mode */
+static bool use_fullspeed;
+/* tx buffer size for high speed */
+static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
+
+/* module parameters */
+module_param(use_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma, "true for DMA");
+module_param(use_dma_ppb, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
+module_param(use_dma_ppb_du, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma_ppb_du,
+	"true for DMA in packet per buffer mode with descriptor update");
+module_param(use_fullspeed, bool, S_IRUGO);
+MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
+
+/*---------------------------------------------------------------------------*/
+/* Prints UDC device registers and endpoint irq registers */
+static void print_regs(struct udc *dev)
+{
+	DBG(dev, "------- Device registers -------\n");
+	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
+	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
+	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
+	DBG(dev, "\n");
+	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
+	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
+	DBG(dev, "\n");
+	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
+	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
+	DBG(dev, "\n");
+	DBG(dev, "USE DMA        = %d\n", use_dma);
+	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
+		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
+			"WITHOUT desc. update)\n");
+		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
+	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
+		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
+			"WITH desc. update)\n");
+		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
+	}
+	if (use_dma && use_dma_bufferfill_mode) {
+		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
+		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
+	}
+	if (!use_dma)
+		dev_info(&dev->pdev->dev, "FIFO mode\n");
+	DBG(dev, "-------------------------------------------------------\n");
+}
+
+/* Masks unused interrupts */
+static int udc_mask_unused_interrupts(struct udc *dev)
+{
+	u32 tmp;
+
+	/* mask all dev interrupts */
+	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
+		AMD_BIT(UDC_DEVINT_ENUM) |
+		AMD_BIT(UDC_DEVINT_US) |
+		AMD_BIT(UDC_DEVINT_UR) |
+		AMD_BIT(UDC_DEVINT_ES) |
+		AMD_BIT(UDC_DEVINT_SI) |
+		AMD_BIT(UDC_DEVINT_SOF)|
+		AMD_BIT(UDC_DEVINT_SC);
+	writel(tmp, &dev->regs->irqmsk);
+
+	/* mask all ep interrupts */
+	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
+
+	return 0;
+}
+
+/* Enables endpoint 0 interrupts */
+static int udc_enable_ep0_interrupts(struct udc *dev)
+{
+	u32 tmp;
+
+	DBG(dev, "udc_enable_ep0_interrupts()\n");
+
+	/* read irq mask */
+	tmp = readl(&dev->regs->ep_irqmsk);
+	/* enable ep0 irq's */
+	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
+		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
+	writel(tmp, &dev->regs->ep_irqmsk);
+
+	return 0;
+}
+
+/* Enables device interrupts for SET_INTF and SET_CONFIG */
+static int udc_enable_dev_setup_interrupts(struct udc *dev)
+{
+	u32 tmp;
+
+	DBG(dev, "enable device interrupts for setup data\n");
+
+	/* read irq mask */
+	tmp = readl(&dev->regs->irqmsk);
+
+	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
+	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
+		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
+		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
+		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
+		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
+	writel(tmp, &dev->regs->irqmsk);
+
+	return 0;
+}
+
+/* Calculates fifo start of endpoint based on preceding endpoints */
+static int udc_set_txfifo_addr(struct udc_ep *ep)
+{
+	struct udc	*dev;
+	u32 tmp;
+	int i;
+
+	if (!ep || !(ep->in))
+		return -EINVAL;
+
+	dev = ep->dev;
+	ep->txfifo = dev->txfifo;
+
+	/* traverse ep's */
+	for (i = 0; i < ep->num; i++) {
+		if (dev->ep[i].regs) {
+			/* read fifo size */
+			tmp = readl(&dev->ep[i].regs->bufin_framenum);
+			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
+			ep->txfifo += tmp;
+		}
+	}
+	return 0;
+}
+
+/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
+static u32 cnak_pending;
+
+static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
+{
+	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
+		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
+		cnak_pending |= 1 << (num);
+		ep->naking = 1;
+	} else
+		cnak_pending = cnak_pending & (~(1 << (num)));
+}
+
+
+/* Enables endpoint, is called by gadget driver */
+static int
+udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
+{
+	struct udc_ep		*ep;
+	struct udc		*dev;
+	u32			tmp;
+	unsigned long		iflags;
+	u8 udc_csr_epix;
+	unsigned		maxpacket;
+
+	if (!usbep
+			|| usbep->name == ep0_string
+			|| !desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+
+	ep = container_of(usbep, struct udc_ep, ep);
+	dev = ep->dev;
+
+	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
+
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&dev->lock, iflags);
+	ep->desc = desc;
+
+	ep->halted = 0;
+
+	/* set traffic type */
+	tmp = readl(&dev->ep[ep->num].regs->ctl);
+	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
+	writel(tmp, &dev->ep[ep->num].regs->ctl);
+
+	/* set max packet size */
+	maxpacket = usb_endpoint_maxp(desc);
+	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
+	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
+	ep->ep.maxpacket = maxpacket;
+	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
+
+	/* IN ep */
+	if (ep->in) {
+
+		/* ep ix in UDC CSR register space */
+		udc_csr_epix = ep->num;
+
+		/* set buffer size (tx fifo entries) */
+		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
+		/* double buffering: fifo size = 2 x max packet size */
+		tmp = AMD_ADDBITS(
+				tmp,
+				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
+					  / UDC_DWORD_BYTES,
+				UDC_EPIN_BUFF_SIZE);
+		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
+
+		/* calc. tx fifo base addr */
+		udc_set_txfifo_addr(ep);
+
+		/* flush fifo */
+		tmp = readl(&ep->regs->ctl);
+		tmp |= AMD_BIT(UDC_EPCTL_F);
+		writel(tmp, &ep->regs->ctl);
+
+	/* OUT ep */
+	} else {
+		/* ep ix in UDC CSR register space */
+		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+
+		/* set max packet size UDC CSR	*/
+		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
+		tmp = AMD_ADDBITS(tmp, maxpacket,
+					UDC_CSR_NE_MAX_PKT);
+		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
+
+		if (use_dma && !ep->in) {
+			/* alloc and init BNA dummy request */
+			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
+			ep->bna_occurred = 0;
+		}
+
+		if (ep->num != UDC_EP0OUT_IX)
+			dev->data_ep_enabled = 1;
+	}
+
+	/* set ep values */
+	tmp = readl(&dev->csr->ne[udc_csr_epix]);
+	/* max packet */
+	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
+	/* ep number */
+	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
+	/* ep direction */
+	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
+	/* ep type */
+	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
+	/* ep config */
+	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
+	/* ep interface */
+	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
+	/* ep alt */
+	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
+	/* write reg */
+	writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+	/* enable ep irq */
+	tmp = readl(&dev->regs->ep_irqmsk);
+	tmp &= AMD_UNMASK_BIT(ep->num);
+	writel(tmp, &dev->regs->ep_irqmsk);
+
+	/*
+	 * clear NAK by writing CNAK
+	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
+	 */
+	if (!use_dma || ep->in) {
+		tmp = readl(&ep->regs->ctl);
+		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+		writel(tmp, &ep->regs->ctl);
+		ep->naking = 0;
+		UDC_QUEUE_CNAK(ep, ep->num);
+	}
+	tmp = desc->bEndpointAddress;
+	DBG(dev, "%s enabled\n", usbep->name);
+
+	spin_unlock_irqrestore(&dev->lock, iflags);
+	return 0;
+}
+
+/* Resets endpoint */
+static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
+{
+	u32		tmp;
+
+	VDBG(ep->dev, "ep-%d reset\n", ep->num);
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->ep.ops = &udc_ep_ops;
+	INIT_LIST_HEAD(&ep->queue);
+
+	ep->ep.maxpacket = (u16) ~0;
+	/* set NAK */
+	tmp = readl(&ep->regs->ctl);
+	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
+	writel(tmp, &ep->regs->ctl);
+	ep->naking = 1;
+
+	/* disable interrupt */
+	tmp = readl(&regs->ep_irqmsk);
+	tmp |= AMD_BIT(ep->num);
+	writel(tmp, &regs->ep_irqmsk);
+
+	if (ep->in) {
+		/* unset P and IN bit of potential former DMA */
+		tmp = readl(&ep->regs->ctl);
+		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
+		writel(tmp, &ep->regs->ctl);
+
+		tmp = readl(&ep->regs->sts);
+		tmp |= AMD_BIT(UDC_EPSTS_IN);
+		writel(tmp, &ep->regs->sts);
+
+		/* flush the fifo */
+		tmp = readl(&ep->regs->ctl);
+		tmp |= AMD_BIT(UDC_EPCTL_F);
+		writel(tmp, &ep->regs->ctl);
+
+	}
+	/* reset desc pointer */
+	writel(0, &ep->regs->desptr);
+}
+
+/* Disables endpoint, is called by gadget driver */
+static int udc_ep_disable(struct usb_ep *usbep)
+{
+	struct udc_ep	*ep = NULL;
+	unsigned long	iflags;
+
+	if (!usbep)
+		return -EINVAL;
+
+	ep = container_of(usbep, struct udc_ep, ep);
+	if (usbep->name == ep0_string || !ep->desc)
+		return -EINVAL;
+
+	DBG(ep->dev, "Disable ep-%d\n", ep->num);
+
+	spin_lock_irqsave(&ep->dev->lock, iflags);
+	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
+	empty_req_queue(ep);
+	ep_init(ep->dev->regs, ep);
+	spin_unlock_irqrestore(&ep->dev->lock, iflags);
+
+	return 0;
+}
+
+/* Allocates request packet, called by gadget driver */
+static struct usb_request *
+udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
+{
+	struct udc_request	*req;
+	struct udc_data_dma	*dma_desc;
+	struct udc_ep	*ep;
+
+	if (!usbep)
+		return NULL;
+
+	ep = container_of(usbep, struct udc_ep, ep);
+
+	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
+	req = kzalloc(sizeof(struct udc_request), gfp);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_DONT_USE;
+	INIT_LIST_HEAD(&req->queue);
+
+	if (ep->dma) {
+		/* ep0 in requests are allocated from data pool here */
+		dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
+						&req->td_phys);
+		if (!dma_desc) {
+			kfree(req);
+			return NULL;
+		}
+
+		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
+				"td_phys = %lx\n",
+				req, dma_desc,
+				(unsigned long)req->td_phys);
+		/* prevent from using desc. - set HOST BUSY */
+		dma_desc->status = AMD_ADDBITS(dma_desc->status,
+						UDC_DMA_STP_STS_BS_HOST_BUSY,
+						UDC_DMA_STP_STS_BS);
+		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
+		req->td_data = dma_desc;
+		req->td_data_last = NULL;
+		req->chain_len = 1;
+	}
+
+	return &req->req;
+}
+
+/* Frees request packet, called by gadget driver */
+static void
+udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
+{
+	struct udc_ep	*ep;
+	struct udc_request	*req;
+
+	if (!usbep || !usbreq)
+		return;
+
+	ep = container_of(usbep, struct udc_ep, ep);
+	req = container_of(usbreq, struct udc_request, req);
+	VDBG(ep->dev, "free_req req=%p\n", req);
+	BUG_ON(!list_empty(&req->queue));
+	if (req->td_data) {
+		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
+
+		/* free dma chain if created */
+		if (req->chain_len > 1)
+			udc_free_dma_chain(ep->dev, req);
+
+		pci_pool_free(ep->dev->data_requests, req->td_data,
+							req->td_phys);
+	}
+	kfree(req);
+}
+
+/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
+static void udc_init_bna_dummy(struct udc_request *req)
+{
+	if (req) {
+		/* set last bit */
+		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+		/* set next pointer to itself */
+		req->td_data->next = req->td_phys;
+		/* set HOST BUSY */
+		req->td_data->status
+			= AMD_ADDBITS(req->td_data->status,
+					UDC_DMA_STP_STS_BS_DMA_DONE,
+					UDC_DMA_STP_STS_BS);
+#ifdef UDC_VERBOSE
+		pr_debug("bna desc = %p, sts = %08x\n",
+			req->td_data, req->td_data->status);
+#endif
+	}
+}
+
+/* Allocate BNA dummy descriptor */
+static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
+{
+	struct udc_request *req = NULL;
+	struct usb_request *_req = NULL;
+
+	/* alloc the dummy request */
+	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
+	if (_req) {
+		req = container_of(_req, struct udc_request, req);
+		ep->bna_dummy_req = req;
+		udc_init_bna_dummy(req);
+	}
+	return req;
+}
+
+/* Write data to TX fifo for IN packets */
+static void
+udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
+{
+	u8			*req_buf;
+	u32			*buf;
+	int			i, j;
+	unsigned		bytes = 0;
+	unsigned		remaining = 0;
+
+	if (!req || !ep)
+		return;
+
+	req_buf = req->buf + req->actual;
+	prefetch(req_buf);
+	remaining = req->length - req->actual;
+
+	buf = (u32 *) req_buf;
+
+	bytes = ep->ep.maxpacket;
+	if (bytes > remaining)
+		bytes = remaining;
+
+	/* dwords first */
+	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
+		writel(*(buf + i), ep->txfifo);
+
+	/* remaining bytes must be written by byte access */
+	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
+		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
+							ep->txfifo);
+	}
+
+	/* dummy write confirm */
+	writel(0, &ep->regs->confirm);
+}
+
+/* Read dwords from RX fifo for OUT transfers */
+static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
+{
+	int i;
+
+	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
+
+	for (i = 0; i < dwords; i++)
+		*(buf + i) = readl(dev->rxfifo);
+	return 0;
+}
+
+/* Read bytes from RX fifo for OUT transfers */
+static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
+{
+	int i, j;
+	u32 tmp;
+
+	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
+
+	/* dwords first */
+	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
+		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
+
+	/* remaining bytes must be read by byte access */
+	if (bytes % UDC_DWORD_BYTES) {
+		tmp = readl(dev->rxfifo);
+		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
+			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
+			tmp = tmp >> UDC_BITS_PER_BYTE;
+		}
+	}
+
+	return 0;
+}
+
+/* Read data from RX fifo for OUT transfers */
+static int
+udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
+{
+	u8 *buf;
+	unsigned buf_space;
+	unsigned bytes = 0;
+	unsigned finished = 0;
+
+	/* received number bytes */
+	bytes = readl(&ep->regs->sts);
+	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
+
+	buf_space = req->req.length - req->req.actual;
+	buf = req->req.buf + req->req.actual;
+	if (bytes > buf_space) {
+		if ((buf_space % ep->ep.maxpacket) != 0) {
+			DBG(ep->dev,
+				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
+				ep->ep.name, bytes, buf_space);
+			req->req.status = -EOVERFLOW;
+		}
+		bytes = buf_space;
+	}
+	req->req.actual += bytes;
+
+	/* last packet ? */
+	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
+		|| ((req->req.actual == req->req.length) && !req->req.zero))
+		finished = 1;
+
+	/* read rx fifo bytes */
+	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
+	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
+
+	return finished;
+}
+
+/* create/re-init a DMA descriptor or a DMA descriptor chain */
+static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
+{
+	int	retval = 0;
+	u32	tmp;
+
+	VDBG(ep->dev, "prep_dma\n");
+	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
+			ep->num, req->td_data);
+
+	/* set buffer pointer */
+	req->td_data->bufptr = req->req.dma;
+
+	/* set last bit */
+	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+
+	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
+	if (use_dma_ppb) {
+
+		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
+		if (retval != 0) {
+			if (retval == -ENOMEM)
+				DBG(ep->dev, "Out of DMA memory\n");
+			return retval;
+		}
+		if (ep->in) {
+			if (req->req.length == ep->ep.maxpacket) {
+				/* write tx bytes */
+				req->td_data->status =
+					AMD_ADDBITS(req->td_data->status,
+						ep->ep.maxpacket,
+						UDC_DMA_IN_STS_TXBYTES);
+
+			}
+		}
+
+	}
+
+	if (ep->in) {
+		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
+				"maxpacket=%d ep%d\n",
+				use_dma_ppb, req->req.length,
+				ep->ep.maxpacket, ep->num);
+		/*
+		 * if bytes < max packet then tx bytes must
+		 * be written in packet per buffer mode
+		 */
+		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
+				|| ep->num == UDC_EP0OUT_IX
+				|| ep->num == UDC_EP0IN_IX) {
+			/* write tx bytes */
+			req->td_data->status =
+				AMD_ADDBITS(req->td_data->status,
+						req->req.length,
+						UDC_DMA_IN_STS_TXBYTES);
+			/* reset frame num */
+			req->td_data->status =
+				AMD_ADDBITS(req->td_data->status,
+						0,
+						UDC_DMA_IN_STS_FRAMENUM);
+		}
+		/* set HOST BUSY */
+		req->td_data->status =
+			AMD_ADDBITS(req->td_data->status,
+				UDC_DMA_STP_STS_BS_HOST_BUSY,
+				UDC_DMA_STP_STS_BS);
+	} else {
+		VDBG(ep->dev, "OUT set host ready\n");
+		/* set HOST READY */
+		req->td_data->status =
+			AMD_ADDBITS(req->td_data->status,
+				UDC_DMA_STP_STS_BS_HOST_READY,
+				UDC_DMA_STP_STS_BS);
+
+
+			/* clear NAK by writing CNAK */
+			if (ep->naking) {
+				tmp = readl(&ep->regs->ctl);
+				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+				writel(tmp, &ep->regs->ctl);
+				ep->naking = 0;
+				UDC_QUEUE_CNAK(ep, ep->num);
+			}
+
+	}
+
+	return retval;
+}
+
+/* Completes request packet ... caller MUST hold lock */
+static void
+complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
+__releases(ep->dev->lock)
+__acquires(ep->dev->lock)
+{
+	struct udc		*dev;
+	unsigned		halted;
+
+	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
+
+	dev = ep->dev;
+	/* unmap DMA */
+	if (ep->dma)
+		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
+
+	halted = ep->halted;
+	ep->halted = 1;
+
+	/* set new status if pending */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = sts;
+
+	/* remove from ep queue */
+	list_del_init(&req->queue);
+
+	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
+		&req->req, req->req.length, ep->ep.name, sts);
+
+	spin_unlock(&dev->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&dev->lock);
+	ep->halted = halted;
+}
+
+/* frees pci pool descriptors of a DMA chain */
+static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
+{
+
+	int ret_val = 0;
+	struct udc_data_dma	*td;
+	struct udc_data_dma	*td_last = NULL;
+	unsigned int i;
+
+	DBG(dev, "free chain req = %p\n", req);
+
+	/* do not free first desc., will be done by free for request */
+	td_last = req->td_data;
+	td = phys_to_virt(td_last->next);
+
+	for (i = 1; i < req->chain_len; i++) {
+
+		pci_pool_free(dev->data_requests, td,
+				(dma_addr_t) td_last->next);
+		td_last = td;
+		td = phys_to_virt(td_last->next);
+	}
+
+	return ret_val;
+}
+
+/* Iterates to the end of a DMA chain and returns last descriptor */
+static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
+{
+	struct udc_data_dma	*td;
+
+	td = req->td_data;
+	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
+		td = phys_to_virt(td->next);
+
+	return td;
+
+}
+
+/* Iterates to the end of a DMA chain and counts bytes received */
+static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
+{
+	struct udc_data_dma	*td;
+	u32 count;
+
+	td = req->td_data;
+	/* received number bytes */
+	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
+
+	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
+		td = phys_to_virt(td->next);
+		/* received number bytes */
+		if (td) {
+			count += AMD_GETBITS(td->status,
+				UDC_DMA_OUT_STS_RXBYTES);
+		}
+	}
+
+	return count;
+
+}
+
+/* Creates or re-inits a DMA chain */
+static int udc_create_dma_chain(
+	struct udc_ep *ep,
+	struct udc_request *req,
+	unsigned long buf_len, gfp_t gfp_flags
+)
+{
+	unsigned long bytes = req->req.length;
+	unsigned int i;
+	dma_addr_t dma_addr;
+	struct udc_data_dma	*td = NULL;
+	struct udc_data_dma	*last = NULL;
+	unsigned long txbytes;
+	unsigned create_new_chain = 0;
+	unsigned len;
+
+	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
+			bytes, buf_len);
+	dma_addr = DMA_DONT_USE;
+
+	/* unset L bit in first desc for OUT */
+	if (!ep->in)
+		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
+
+	/* alloc only new desc's if not already available */
+	len = req->req.length / ep->ep.maxpacket;
+	if (req->req.length % ep->ep.maxpacket)
+		len++;
+
+	if (len > req->chain_len) {
+		/* shorter chain already allocated before */
+		if (req->chain_len > 1)
+			udc_free_dma_chain(ep->dev, req);
+		req->chain_len = len;
+		create_new_chain = 1;
+	}
+
+	td = req->td_data;
+	/* gen. required number of descriptors and buffers */
+	for (i = buf_len; i < bytes; i += buf_len) {
+		/* create or determine next desc. */
+		if (create_new_chain) {
+
+			td = pci_pool_alloc(ep->dev->data_requests,
+					gfp_flags, &dma_addr);
+			if (!td)
+				return -ENOMEM;
+
+			td->status = 0;
+		} else if (i == buf_len) {
+			/* first td */
+			td = (struct udc_data_dma *) phys_to_virt(
+						req->td_data->next);
+			td->status = 0;
+		} else {
+			td = (struct udc_data_dma *) phys_to_virt(last->next);
+			td->status = 0;
+		}
+
+
+		if (td)
+			td->bufptr = req->req.dma + i; /* assign buffer */
+		else
+			break;
+
+		/* short packet ? */
+		if ((bytes - i) >= buf_len) {
+			txbytes = buf_len;
+		} else {
+			/* short packet */
+			txbytes = bytes - i;
+		}
+
+		/* link td and assign tx bytes */
+		if (i == buf_len) {
+			if (create_new_chain)
+				req->td_data->next = dma_addr;
+			/*
+			else
+				req->td_data->next = virt_to_phys(td);
+			*/
+			/* write tx bytes */
+			if (ep->in) {
+				/* first desc */
+				req->td_data->status =
+					AMD_ADDBITS(req->td_data->status,
+							ep->ep.maxpacket,
+							UDC_DMA_IN_STS_TXBYTES);
+				/* second desc */
+				td->status = AMD_ADDBITS(td->status,
+							txbytes,
+							UDC_DMA_IN_STS_TXBYTES);
+			}
+		} else {
+			if (create_new_chain)
+				last->next = dma_addr;
+			/*
+			else
+				last->next = virt_to_phys(td);
+			*/
+			if (ep->in) {
+				/* write tx bytes */
+				td->status = AMD_ADDBITS(td->status,
+							txbytes,
+							UDC_DMA_IN_STS_TXBYTES);
+			}
+		}
+		last = td;
+	}
+	/* set last bit */
+	if (td) {
+		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+		/* last desc. points to itself */
+		req->td_data_last = td;
+	}
+
+	return 0;
+}
+
+/* Enabling RX DMA */
+static void udc_set_rde(struct udc *dev)
+{
+	u32 tmp;
+
+	VDBG(dev, "udc_set_rde()\n");
+	/* stop RDE timer */
+	if (timer_pending(&udc_timer)) {
+		set_rde = 0;
+		mod_timer(&udc_timer, jiffies - 1);
+	}
+	/* set RDE */
+	tmp = readl(&dev->regs->ctl);
+	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
+	writel(tmp, &dev->regs->ctl);
+}
+
+/* Queues a request packet, called by gadget driver */
+static int
+udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
+{
+	int			retval = 0;
+	u8			open_rxfifo = 0;
+	unsigned long		iflags;
+	struct udc_ep		*ep;
+	struct udc_request	*req;
+	struct udc		*dev;
+	u32			tmp;
+
+	/* check the inputs */
+	req = container_of(usbreq, struct udc_request, req);
+
+	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
+			|| !list_empty(&req->queue))
+		return -EINVAL;
+
+	ep = container_of(usbep, struct udc_ep, ep);
+	if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
+		return -EINVAL;
+
+	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
+	dev = ep->dev;
+
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	/* map dma (usually done before) */
+	if (ep->dma) {
+		VDBG(dev, "DMA map req %p\n", req);
+		retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
+		if (retval)
+			return retval;
+	}
+
+	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
+			usbep->name, usbreq, usbreq->length,
+			req->td_data, usbreq->buf);
+
+	spin_lock_irqsave(&dev->lock, iflags);
+	usbreq->actual = 0;
+	usbreq->status = -EINPROGRESS;
+	req->dma_done = 0;
+
+	/* on empty queue just do first transfer */
+	if (list_empty(&ep->queue)) {
+		/* zlp */
+		if (usbreq->length == 0) {
+			/* IN zlp's are handled by hardware */
+			complete_req(ep, req, 0);
+			VDBG(dev, "%s: zlp\n", ep->ep.name);
+			/*
+			 * if set_config or set_intf is waiting for ack by zlp
+			 * then set CSR_DONE
+			 */
+			if (dev->set_cfg_not_acked) {
+				tmp = readl(&dev->regs->ctl);
+				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
+				writel(tmp, &dev->regs->ctl);
+				dev->set_cfg_not_acked = 0;
+			}
+			/* setup command is ACK'ed now by zlp */
+			if (dev->waiting_zlp_ack_ep0in) {
+				/* clear NAK by writing CNAK in EP0_IN */
+				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+				dev->ep[UDC_EP0IN_IX].naking = 0;
+				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
+							UDC_EP0IN_IX);
+				dev->waiting_zlp_ack_ep0in = 0;
+			}
+			goto finished;
+		}
+		if (ep->dma) {
+			retval = prep_dma(ep, req, gfp);
+			if (retval != 0)
+				goto finished;
+			/* write desc pointer to enable DMA */
+			if (ep->in) {
+				/* set HOST READY */
+				req->td_data->status =
+					AMD_ADDBITS(req->td_data->status,
+						UDC_DMA_IN_STS_BS_HOST_READY,
+						UDC_DMA_IN_STS_BS);
+			}
+
+			/* disabled rx dma while descriptor update */
+			if (!ep->in) {
+				/* stop RDE timer */
+				if (timer_pending(&udc_timer)) {
+					set_rde = 0;
+					mod_timer(&udc_timer, jiffies - 1);
+				}
+				/* clear RDE */
+				tmp = readl(&dev->regs->ctl);
+				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
+				writel(tmp, &dev->regs->ctl);
+				open_rxfifo = 1;
+
+				/*
+				 * if BNA occurred then let BNA dummy desc.
+				 * point to current desc.
+				 */
+				if (ep->bna_occurred) {
+					VDBG(dev, "copy to BNA dummy desc.\n");
+					memcpy(ep->bna_dummy_req->td_data,
+						req->td_data,
+						sizeof(struct udc_data_dma));
+				}
+			}
+			/* write desc pointer */
+			writel(req->td_phys, &ep->regs->desptr);
+
+			/* clear NAK by writing CNAK */
+			if (ep->naking) {
+				tmp = readl(&ep->regs->ctl);
+				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+				writel(tmp, &ep->regs->ctl);
+				ep->naking = 0;
+				UDC_QUEUE_CNAK(ep, ep->num);
+			}
+
+			if (ep->in) {
+				/* enable ep irq */
+				tmp = readl(&dev->regs->ep_irqmsk);
+				tmp &= AMD_UNMASK_BIT(ep->num);
+				writel(tmp, &dev->regs->ep_irqmsk);
+			}
+		} else if (ep->in) {
+				/* enable ep irq */
+				tmp = readl(&dev->regs->ep_irqmsk);
+				tmp &= AMD_UNMASK_BIT(ep->num);
+				writel(tmp, &dev->regs->ep_irqmsk);
+			}
+
+	} else if (ep->dma) {
+
+		/*
+		 * prep_dma not used for OUT ep's, this is not possible
+		 * for PPB modes, because of chain creation reasons
+		 */
+		if (ep->in) {
+			retval = prep_dma(ep, req, gfp);
+			if (retval != 0)
+				goto finished;
+		}
+	}
+	VDBG(dev, "list_add\n");
+	/* add request to ep queue */
+	if (req) {
+
+		list_add_tail(&req->queue, &ep->queue);
+
+		/* open rxfifo if out data queued */
+		if (open_rxfifo) {
+			/* enable DMA */
+			req->dma_going = 1;
+			udc_set_rde(dev);
+			if (ep->num != UDC_EP0OUT_IX)
+				dev->data_ep_queued = 1;
+		}
+		/* stop OUT naking */
+		if (!ep->in) {
+			if (!use_dma && udc_rxfifo_pending) {
+				DBG(dev, "udc_queue(): pending bytes in "
+					"rxfifo after nyet\n");
+				/*
+				 * read pending bytes afer nyet:
+				 * referring to isr
+				 */
+				if (udc_rxfifo_read(ep, req)) {
+					/* finish */
+					complete_req(ep, req, 0);
+				}
+				udc_rxfifo_pending = 0;
+
+			}
+		}
+	}
+
+finished:
+	spin_unlock_irqrestore(&dev->lock, iflags);
+	return retval;
+}
+
+/* Empty request queue of an endpoint; caller holds spinlock */
+static void empty_req_queue(struct udc_ep *ep)
+{
+	struct udc_request	*req;
+
+	ep->halted = 1;
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next,
+			struct udc_request,
+			queue);
+		complete_req(ep, req, -ESHUTDOWN);
+	}
+}
+
+/* Dequeues a request packet, called by gadget driver */
+static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
+{
+	struct udc_ep		*ep;
+	struct udc_request	*req;
+	unsigned		halted;
+	unsigned long		iflags;
+
+	ep = container_of(usbep, struct udc_ep, ep);
+	if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
+				&& ep->num != UDC_EP0OUT_IX)))
+		return -EINVAL;
+
+	req = container_of(usbreq, struct udc_request, req);
+
+	spin_lock_irqsave(&ep->dev->lock, iflags);
+	halted = ep->halted;
+	ep->halted = 1;
+	/* request in processing or next one */
+	if (ep->queue.next == &req->queue) {
+		if (ep->dma && req->dma_going) {
+			if (ep->in)
+				ep->cancel_transfer = 1;
+			else {
+				u32 tmp;
+				u32 dma_sts;
+				/* stop potential receive DMA */
+				tmp = readl(&udc->regs->ctl);
+				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
+							&udc->regs->ctl);
+				/*
+				 * Cancel transfer later in ISR
+				 * if descriptor was touched.
+				 */
+				dma_sts = AMD_GETBITS(req->td_data->status,
+							UDC_DMA_OUT_STS_BS);
+				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
+					ep->cancel_transfer = 1;
+				else {
+					udc_init_bna_dummy(ep->req);
+					writel(ep->bna_dummy_req->td_phys,
+						&ep->regs->desptr);
+				}
+				writel(tmp, &udc->regs->ctl);
+			}
+		}
+	}
+	complete_req(ep, req, -ECONNRESET);
+	ep->halted = halted;
+
+	spin_unlock_irqrestore(&ep->dev->lock, iflags);
+	return 0;
+}
+
+/* Halt or clear halt of endpoint */
+static int
+udc_set_halt(struct usb_ep *usbep, int halt)
+{
+	struct udc_ep	*ep;
+	u32 tmp;
+	unsigned long iflags;
+	int retval = 0;
+
+	if (!usbep)
+		return -EINVAL;
+
+	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
+
+	ep = container_of(usbep, struct udc_ep, ep);
+	if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
+		return -EINVAL;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&udc_stall_spinlock, iflags);
+	/* halt or clear halt */
+	if (halt) {
+		if (ep->num == 0)
+			ep->dev->stall_ep0in = 1;
+		else {
+			/*
+			 * set STALL
+			 * rxfifo empty not taken into acount
+			 */
+			tmp = readl(&ep->regs->ctl);
+			tmp |= AMD_BIT(UDC_EPCTL_S);
+			writel(tmp, &ep->regs->ctl);
+			ep->halted = 1;
+
+			/* setup poll timer */
+			if (!timer_pending(&udc_pollstall_timer)) {
+				udc_pollstall_timer.expires = jiffies +
+					HZ * UDC_POLLSTALL_TIMER_USECONDS
+					/ (1000 * 1000);
+				if (!stop_pollstall_timer) {
+					DBG(ep->dev, "start polltimer\n");
+					add_timer(&udc_pollstall_timer);
+				}
+			}
+		}
+	} else {
+		/* ep is halted by set_halt() before */
+		if (ep->halted) {
+			tmp = readl(&ep->regs->ctl);
+			/* clear stall bit */
+			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+			/* clear NAK by writing CNAK */
+			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+			writel(tmp, &ep->regs->ctl);
+			ep->halted = 0;
+			UDC_QUEUE_CNAK(ep, ep->num);
+		}
+	}
+	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+	return retval;
+}
+
+/* gadget interface */
+static const struct usb_ep_ops udc_ep_ops = {
+	.enable		= udc_ep_enable,
+	.disable	= udc_ep_disable,
+
+	.alloc_request	= udc_alloc_request,
+	.free_request	= udc_free_request,
+
+	.queue		= udc_queue,
+	.dequeue	= udc_dequeue,
+
+	.set_halt	= udc_set_halt,
+	/* fifo ops not implemented */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Get frame counter (not implemented) */
+static int udc_get_frame(struct usb_gadget *gadget)
+{
+	return -EOPNOTSUPP;
+}
+
+/* Remote wakeup gadget interface */
+static int udc_wakeup(struct usb_gadget *gadget)
+{
+	struct udc		*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct udc, gadget);
+	udc_remote_wakeup(dev);
+
+	return 0;
+}
+
+static int amd5536_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int amd5536_stop(struct usb_gadget_driver *driver);
+/* gadget operations */
+static const struct usb_gadget_ops udc_ops = {
+	.wakeup		= udc_wakeup,
+	.get_frame	= udc_get_frame,
+	.start		= amd5536_start,
+	.stop		= amd5536_stop,
+};
+
+/* Setups endpoint parameters, adds endpoints to linked list */
+static void make_ep_lists(struct udc *dev)
+{
+	/* make gadget ep lists */
+	INIT_LIST_HEAD(&dev->gadget.ep_list);
+	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
+						&dev->gadget.ep_list);
+	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
+						&dev->gadget.ep_list);
+	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
+						&dev->gadget.ep_list);
+
+	/* fifo config */
+	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
+	if (dev->gadget.speed == USB_SPEED_FULL)
+		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
+	else if (dev->gadget.speed == USB_SPEED_HIGH)
+		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
+	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
+}
+
+/* init registers at driver load time */
+static int startup_registers(struct udc *dev)
+{
+	u32 tmp;
+
+	/* init controller by soft reset */
+	udc_soft_reset(dev);
+
+	/* mask not needed interrupts */
+	udc_mask_unused_interrupts(dev);
+
+	/* put into initial config */
+	udc_basic_init(dev);
+	/* link up all endpoints */
+	udc_setup_endpoints(dev);
+
+	/* program speed */
+	tmp = readl(&dev->regs->cfg);
+	if (use_fullspeed)
+		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
+	else
+		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
+	writel(tmp, &dev->regs->cfg);
+
+	return 0;
+}
+
+/* Inits UDC context */
+static void udc_basic_init(struct udc *dev)
+{
+	u32	tmp;
+
+	DBG(dev, "udc_basic_init()\n");
+
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+	/* stop RDE timer */
+	if (timer_pending(&udc_timer)) {
+		set_rde = 0;
+		mod_timer(&udc_timer, jiffies - 1);
+	}
+	/* stop poll stall timer */
+	if (timer_pending(&udc_pollstall_timer))
+		mod_timer(&udc_pollstall_timer, jiffies - 1);
+	/* disable DMA */
+	tmp = readl(&dev->regs->ctl);
+	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
+	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
+	writel(tmp, &dev->regs->ctl);
+
+	/* enable dynamic CSR programming */
+	tmp = readl(&dev->regs->cfg);
+	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
+	/* set self powered */
+	tmp |= AMD_BIT(UDC_DEVCFG_SP);
+	/* set remote wakeupable */
+	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
+	writel(tmp, &dev->regs->cfg);
+
+	make_ep_lists(dev);
+
+	dev->data_ep_enabled = 0;
+	dev->data_ep_queued = 0;
+}
+
+/* Sets initial endpoint parameters */
+static void udc_setup_endpoints(struct udc *dev)
+{
+	struct udc_ep	*ep;
+	u32	tmp;
+	u32	reg;
+
+	DBG(dev, "udc_setup_endpoints()\n");
+
+	/* read enum speed */
+	tmp = readl(&dev->regs->sts);
+	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
+	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
+		dev->gadget.speed = USB_SPEED_HIGH;
+	else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
+		dev->gadget.speed = USB_SPEED_FULL;
+
+	/* set basic ep parameters */
+	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
+		ep = &dev->ep[tmp];
+		ep->dev = dev;
+		ep->ep.name = ep_string[tmp];
+		ep->num = tmp;
+		/* txfifo size is calculated at enable time */
+		ep->txfifo = dev->txfifo;
+
+		/* fifo size */
+		if (tmp < UDC_EPIN_NUM) {
+			ep->fifo_depth = UDC_TXFIFO_SIZE;
+			ep->in = 1;
+		} else {
+			ep->fifo_depth = UDC_RXFIFO_SIZE;
+			ep->in = 0;
+
+		}
+		ep->regs = &dev->ep_regs[tmp];
+		/*
+		 * ep will be reset only if ep was not enabled before to avoid
+		 * disabling ep interrupts when ENUM interrupt occurs but ep is
+		 * not enabled by gadget driver
+		 */
+		if (!ep->desc)
+			ep_init(dev->regs, ep);
+
+		if (use_dma) {
+			/*
+			 * ep->dma is not really used, just to indicate that
+			 * DMA is active: remove this
+			 * dma regs = dev control regs
+			 */
+			ep->dma = &dev->regs->ctl;
+
+			/* nak OUT endpoints until enable - not for ep0 */
+			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
+						&& tmp > UDC_EPIN_NUM) {
+				/* set NAK */
+				reg = readl(&dev->ep[tmp].regs->ctl);
+				reg |= AMD_BIT(UDC_EPCTL_SNAK);
+				writel(reg, &dev->ep[tmp].regs->ctl);
+				dev->ep[tmp].naking = 1;
+
+			}
+		}
+	}
+	/* EP0 max packet */
+	if (dev->gadget.speed == USB_SPEED_FULL) {
+		dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
+		dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
+						UDC_FS_EP0OUT_MAX_PKT_SIZE;
+	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
+		dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
+		dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
+	}
+
+	/*
+	 * with suspend bug workaround, ep0 params for gadget driver
+	 * are set at gadget driver bind() call
+	 */
+	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
+	dev->ep[UDC_EP0IN_IX].halted = 0;
+	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+
+	/* init cfg/alt/int */
+	dev->cur_config = 0;
+	dev->cur_intf = 0;
+	dev->cur_alt = 0;
+}
+
+/* Bringup after Connect event, initial bringup to be ready for ep0 events */
+static void usb_connect(struct udc *dev)
+{
+
+	dev_info(&dev->pdev->dev, "USB Connect\n");
+
+	dev->connected = 1;
+
+	/* put into initial config */
+	udc_basic_init(dev);
+
+	/* enable device setup interrupts */
+	udc_enable_dev_setup_interrupts(dev);
+}
+
+/*
+ * Calls gadget with disconnect event and resets the UDC and makes
+ * initial bringup to be ready for ep0 events
+ */
+static void usb_disconnect(struct udc *dev)
+{
+
+	dev_info(&dev->pdev->dev, "USB Disconnect\n");
+
+	dev->connected = 0;
+
+	/* mask interrupts */
+	udc_mask_unused_interrupts(dev);
+
+	/* REVISIT there doesn't seem to be a point to having this
+	 * talk to a tasklet ... do it directly, we already hold
+	 * the spinlock needed to process the disconnect.
+	 */
+
+	tasklet_schedule(&disconnect_tasklet);
+}
+
+/* Tasklet for disconnect to be outside of interrupt context */
+static void udc_tasklet_disconnect(unsigned long par)
+{
+	struct udc *dev = (struct udc *)(*((struct udc **) par));
+	u32 tmp;
+
+	DBG(dev, "Tasklet disconnect\n");
+	spin_lock_irq(&dev->lock);
+
+	if (dev->driver) {
+		spin_unlock(&dev->lock);
+		dev->driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+
+		/* empty queues */
+		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
+			empty_req_queue(&dev->ep[tmp]);
+
+	}
+
+	/* disable ep0 */
+	ep_init(dev->regs,
+			&dev->ep[UDC_EP0IN_IX]);
+
+
+	if (!soft_reset_occured) {
+		/* init controller by soft reset */
+		udc_soft_reset(dev);
+		soft_reset_occured++;
+	}
+
+	/* re-enable dev interrupts */
+	udc_enable_dev_setup_interrupts(dev);
+	/* back to full speed ? */
+	if (use_fullspeed) {
+		tmp = readl(&dev->regs->cfg);
+		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
+		writel(tmp, &dev->regs->cfg);
+	}
+
+	spin_unlock_irq(&dev->lock);
+}
+
+/* Reset the UDC core */
+static void udc_soft_reset(struct udc *dev)
+{
+	unsigned long	flags;
+
+	DBG(dev, "Soft reset\n");
+	/*
+	 * reset possible waiting interrupts, because int.
+	 * status is lost after soft reset,
+	 * ep int. status reset
+	 */
+	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
+	/* device int. status reset */
+	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
+
+	spin_lock_irqsave(&udc_irq_spinlock, flags);
+	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
+	readl(&dev->regs->cfg);
+	spin_unlock_irqrestore(&udc_irq_spinlock, flags);
+
+}
+
+/* RDE timer callback to set RDE bit */
+static void udc_timer_function(unsigned long v)
+{
+	u32 tmp;
+
+	spin_lock_irq(&udc_irq_spinlock);
+
+	if (set_rde > 0) {
+		/*
+		 * open the fifo if fifo was filled on last timer call
+		 * conditionally
+		 */
+		if (set_rde > 1) {
+			/* set RDE to receive setup data */
+			tmp = readl(&udc->regs->ctl);
+			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
+			writel(tmp, &udc->regs->ctl);
+			set_rde = -1;
+		} else if (readl(&udc->regs->sts)
+				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
+			/*
+			 * if fifo empty setup polling, do not just
+			 * open the fifo
+			 */
+			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
+			if (!stop_timer)
+				add_timer(&udc_timer);
+		} else {
+			/*
+			 * fifo contains data now, setup timer for opening
+			 * the fifo when timer expires to be able to receive
+			 * setup packets, when data packets gets queued by
+			 * gadget layer then timer will forced to expire with
+			 * set_rde=0 (RDE is set in udc_queue())
+			 */
+			set_rde++;
+			/* debug: lhadmot_timer_start = 221070 */
+			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
+			if (!stop_timer)
+				add_timer(&udc_timer);
+		}
+
+	} else
+		set_rde = -1; /* RDE was set by udc_queue() */
+	spin_unlock_irq(&udc_irq_spinlock);
+	if (stop_timer)
+		complete(&on_exit);
+
+}
+
+/* Handle halt state, used in stall poll timer */
+static void udc_handle_halt_state(struct udc_ep *ep)
+{
+	u32 tmp;
+	/* set stall as long not halted */
+	if (ep->halted == 1) {
+		tmp = readl(&ep->regs->ctl);
+		/* STALL cleared ? */
+		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
+			/*
+			 * FIXME: MSC spec requires that stall remains
+			 * even on receivng of CLEAR_FEATURE HALT. So
+			 * we would set STALL again here to be compliant.
+			 * But with current mass storage drivers this does
+			 * not work (would produce endless host retries).
+			 * So we clear halt on CLEAR_FEATURE.
+			 *
+			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
+			tmp |= AMD_BIT(UDC_EPCTL_S);
+			writel(tmp, &ep->regs->ctl);*/
+
+			/* clear NAK by writing CNAK */
+			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+			writel(tmp, &ep->regs->ctl);
+			ep->halted = 0;
+			UDC_QUEUE_CNAK(ep, ep->num);
+		}
+	}
+}
+
+/* Stall timer callback to poll S bit and set it again after */
+static void udc_pollstall_timer_function(unsigned long v)
+{
+	struct udc_ep *ep;
+	int halted = 0;
+
+	spin_lock_irq(&udc_stall_spinlock);
+	/*
+	 * only one IN and OUT endpoints are handled
+	 * IN poll stall
+	 */
+	ep = &udc->ep[UDC_EPIN_IX];
+	udc_handle_halt_state(ep);
+	if (ep->halted)
+		halted = 1;
+	/* OUT poll stall */
+	ep = &udc->ep[UDC_EPOUT_IX];
+	udc_handle_halt_state(ep);
+	if (ep->halted)
+		halted = 1;
+
+	/* setup timer again when still halted */
+	if (!stop_pollstall_timer && halted) {
+		udc_pollstall_timer.expires = jiffies +
+					HZ * UDC_POLLSTALL_TIMER_USECONDS
+					/ (1000 * 1000);
+		add_timer(&udc_pollstall_timer);
+	}
+	spin_unlock_irq(&udc_stall_spinlock);
+
+	if (stop_pollstall_timer)
+		complete(&on_pollstall_exit);
+}
+
+/* Inits endpoint 0 so that SETUP packets are processed */
+static void activate_control_endpoints(struct udc *dev)
+{
+	u32 tmp;
+
+	DBG(dev, "activate_control_endpoints\n");
+
+	/* flush fifo */
+	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+	tmp |= AMD_BIT(UDC_EPCTL_F);
+	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+
+	/* set ep0 directions */
+	dev->ep[UDC_EP0IN_IX].in = 1;
+	dev->ep[UDC_EP0OUT_IX].in = 0;
+
+	/* set buffer size (tx fifo entries) of EP0_IN */
+	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
+	if (dev->gadget.speed == USB_SPEED_FULL)
+		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
+					UDC_EPIN_BUFF_SIZE);
+	else if (dev->gadget.speed == USB_SPEED_HIGH)
+		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
+					UDC_EPIN_BUFF_SIZE);
+	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
+
+	/* set max packet size of EP0_IN */
+	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
+	if (dev->gadget.speed == USB_SPEED_FULL)
+		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
+					UDC_EP_MAX_PKT_SIZE);
+	else if (dev->gadget.speed == USB_SPEED_HIGH)
+		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
+				UDC_EP_MAX_PKT_SIZE);
+	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
+
+	/* set max packet size of EP0_OUT */
+	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
+	if (dev->gadget.speed == USB_SPEED_FULL)
+		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
+					UDC_EP_MAX_PKT_SIZE);
+	else if (dev->gadget.speed == USB_SPEED_HIGH)
+		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
+					UDC_EP_MAX_PKT_SIZE);
+	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
+
+	/* set max packet size of EP0 in UDC CSR */
+	tmp = readl(&dev->csr->ne[0]);
+	if (dev->gadget.speed == USB_SPEED_FULL)
+		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
+					UDC_CSR_NE_MAX_PKT);
+	else if (dev->gadget.speed == USB_SPEED_HIGH)
+		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
+					UDC_CSR_NE_MAX_PKT);
+	writel(tmp, &dev->csr->ne[0]);
+
+	if (use_dma) {
+		dev->ep[UDC_EP0OUT_IX].td->status |=
+			AMD_BIT(UDC_DMA_OUT_STS_L);
+		/* write dma desc address */
+		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
+			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
+		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
+			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
+		/* stop RDE timer */
+		if (timer_pending(&udc_timer)) {
+			set_rde = 0;
+			mod_timer(&udc_timer, jiffies - 1);
+		}
+		/* stop pollstall timer */
+		if (timer_pending(&udc_pollstall_timer))
+			mod_timer(&udc_pollstall_timer, jiffies - 1);
+		/* enable DMA */
+		tmp = readl(&dev->regs->ctl);
+		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
+				| AMD_BIT(UDC_DEVCTL_RDE)
+				| AMD_BIT(UDC_DEVCTL_TDE);
+		if (use_dma_bufferfill_mode)
+			tmp |= AMD_BIT(UDC_DEVCTL_BF);
+		else if (use_dma_ppb_du)
+			tmp |= AMD_BIT(UDC_DEVCTL_DU);
+		writel(tmp, &dev->regs->ctl);
+	}
+
+	/* clear NAK by writing CNAK for EP0IN */
+	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+	dev->ep[UDC_EP0IN_IX].naking = 0;
+	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
+
+	/* clear NAK by writing CNAK for EP0OUT */
+	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+	dev->ep[UDC_EP0OUT_IX].naking = 0;
+	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
+}
+
+/* Make endpoint 0 ready for control traffic */
+static int setup_ep0(struct udc *dev)
+{
+	activate_control_endpoints(dev);
+	/* enable ep0 interrupts */
+	udc_enable_ep0_interrupts(dev);
+	/* enable device setup interrupts */
+	udc_enable_dev_setup_interrupts(dev);
+
+	return 0;
+}
+
+/* Called by gadget driver to register itself */
+static int amd5536_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct udc		*dev = udc;
+	int			retval;
+	u32 tmp;
+
+	if (!driver || !bind || !driver->setup
+			|| driver->max_speed < USB_SPEED_HIGH)
+		return -EINVAL;
+	if (!dev)
+		return -ENODEV;
+	if (dev->driver)
+		return -EBUSY;
+
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+
+	retval = bind(&dev->gadget);
+
+	/* Some gadget drivers use both ep0 directions.
+	 * NOTE: to gadget driver, ep0 is just one endpoint...
+	 */
+	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
+		dev->ep[UDC_EP0IN_IX].ep.driver_data;
+
+	if (retval) {
+		DBG(dev, "binding to %s returning %d\n",
+				driver->driver.name, retval);
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+
+	/* get ready for ep0 traffic */
+	setup_ep0(dev);
+
+	/* clear SD */
+	tmp = readl(&dev->regs->ctl);
+	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
+	writel(tmp, &dev->regs->ctl);
+
+	usb_connect(dev);
+
+	return 0;
+}
+
+/* shutdown requests and disconnect from gadget */
+static void
+shutdown(struct udc *dev, struct usb_gadget_driver *driver)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+	int tmp;
+
+	if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
+		spin_unlock(&dev->lock);
+		driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+	}
+
+	/* empty queues and init hardware */
+	udc_basic_init(dev);
+	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
+		empty_req_queue(&dev->ep[tmp]);
+
+	udc_setup_endpoints(dev);
+}
+
+/* Called by gadget driver to unregister itself */
+static int amd5536_stop(struct usb_gadget_driver *driver)
+{
+	struct udc	*dev = udc;
+	unsigned long	flags;
+	u32 tmp;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver || !driver->unbind)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	udc_mask_unused_interrupts(dev);
+	shutdown(dev, driver);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	/* set SD */
+	tmp = readl(&dev->regs->ctl);
+	tmp |= AMD_BIT(UDC_DEVCTL_SD);
+	writel(tmp, &dev->regs->ctl);
+
+
+	DBG(dev, "%s: unregistered\n", driver->driver.name);
+
+	return 0;
+}
+
+/* Clear pending NAK bits */
+static void udc_process_cnak_queue(struct udc *dev)
+{
+	u32 tmp;
+	u32 reg;
+
+	/* check epin's */
+	DBG(dev, "CNAK pending queue processing\n");
+	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
+		if (cnak_pending & (1 << tmp)) {
+			DBG(dev, "CNAK pending for ep%d\n", tmp);
+			/* clear NAK by writing CNAK */
+			reg = readl(&dev->ep[tmp].regs->ctl);
+			reg |= AMD_BIT(UDC_EPCTL_CNAK);
+			writel(reg, &dev->ep[tmp].regs->ctl);
+			dev->ep[tmp].naking = 0;
+			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
+		}
+	}
+	/* ...	and ep0out */
+	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
+		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
+		/* clear NAK by writing CNAK */
+		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+		reg |= AMD_BIT(UDC_EPCTL_CNAK);
+		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+		dev->ep[UDC_EP0OUT_IX].naking = 0;
+		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
+				dev->ep[UDC_EP0OUT_IX].num);
+	}
+}
+
+/* Enabling RX DMA after setup packet */
+static void udc_ep0_set_rde(struct udc *dev)
+{
+	if (use_dma) {
+		/*
+		 * only enable RXDMA when no data endpoint enabled
+		 * or data is queued
+		 */
+		if (!dev->data_ep_enabled || dev->data_ep_queued) {
+			udc_set_rde(dev);
+		} else {
+			/*
+			 * setup timer for enabling RDE (to not enable
+			 * RXFIFO DMA for data endpoints to early)
+			 */
+			if (set_rde != 0 && !timer_pending(&udc_timer)) {
+				udc_timer.expires =
+					jiffies + HZ/UDC_RDE_TIMER_DIV;
+				set_rde = 1;
+				if (!stop_timer)
+					add_timer(&udc_timer);
+			}
+		}
+	}
+}
+
+
+/* Interrupt handler for data OUT traffic */
+static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
+{
+	irqreturn_t		ret_val = IRQ_NONE;
+	u32			tmp;
+	struct udc_ep		*ep;
+	struct udc_request	*req;
+	unsigned int		count;
+	struct udc_data_dma	*td = NULL;
+	unsigned		dma_done;
+
+	VDBG(dev, "ep%d irq\n", ep_ix);
+	ep = &dev->ep[ep_ix];
+
+	tmp = readl(&ep->regs->sts);
+	if (use_dma) {
+		/* BNA event ? */
+		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
+			DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
+					ep->num, readl(&ep->regs->desptr));
+			/* clear BNA */
+			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
+			if (!ep->cancel_transfer)
+				ep->bna_occurred = 1;
+			else
+				ep->cancel_transfer = 0;
+			ret_val = IRQ_HANDLED;
+			goto finished;
+		}
+	}
+	/* HE event ? */
+	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
+		dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
+
+		/* clear HE */
+		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
+		ret_val = IRQ_HANDLED;
+		goto finished;
+	}
+
+	if (!list_empty(&ep->queue)) {
+
+		/* next request */
+		req = list_entry(ep->queue.next,
+			struct udc_request, queue);
+	} else {
+		req = NULL;
+		udc_rxfifo_pending = 1;
+	}
+	VDBG(dev, "req = %p\n", req);
+	/* fifo mode */
+	if (!use_dma) {
+
+		/* read fifo */
+		if (req && udc_rxfifo_read(ep, req)) {
+			ret_val = IRQ_HANDLED;
+
+			/* finish */
+			complete_req(ep, req, 0);
+			/* next request */
+			if (!list_empty(&ep->queue) && !ep->halted) {
+				req = list_entry(ep->queue.next,
+					struct udc_request, queue);
+			} else
+				req = NULL;
+		}
+
+	/* DMA */
+	} else if (!ep->cancel_transfer && req != NULL) {
+		ret_val = IRQ_HANDLED;
+
+		/* check for DMA done */
+		if (!use_dma_ppb) {
+			dma_done = AMD_GETBITS(req->td_data->status,
+						UDC_DMA_OUT_STS_BS);
+		/* packet per buffer mode - rx bytes */
+		} else {
+			/*
+			 * if BNA occurred then recover desc. from
+			 * BNA dummy desc.
+			 */
+			if (ep->bna_occurred) {
+				VDBG(dev, "Recover desc. from BNA dummy\n");
+				memcpy(req->td_data, ep->bna_dummy_req->td_data,
+						sizeof(struct udc_data_dma));
+				ep->bna_occurred = 0;
+				udc_init_bna_dummy(ep->req);
+			}
+			td = udc_get_last_dma_desc(req);
+			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
+		}
+		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
+			/* buffer fill mode - rx bytes */
+			if (!use_dma_ppb) {
+				/* received number bytes */
+				count = AMD_GETBITS(req->td_data->status,
+						UDC_DMA_OUT_STS_RXBYTES);
+				VDBG(dev, "rx bytes=%u\n", count);
+			/* packet per buffer mode - rx bytes */
+			} else {
+				VDBG(dev, "req->td_data=%p\n", req->td_data);
+				VDBG(dev, "last desc = %p\n", td);
+				/* received number bytes */
+				if (use_dma_ppb_du) {
+					/* every desc. counts bytes */
+					count = udc_get_ppbdu_rxbytes(req);
+				} else {
+					/* last desc. counts bytes */
+					count = AMD_GETBITS(td->status,
+						UDC_DMA_OUT_STS_RXBYTES);
+					if (!count && req->req.length
+						== UDC_DMA_MAXPACKET) {
+						/*
+						 * on 64k packets the RXBYTES
+						 * field is zero
+						 */
+						count = UDC_DMA_MAXPACKET;
+					}
+				}
+				VDBG(dev, "last desc rx bytes=%u\n", count);
+			}
+
+			tmp = req->req.length - req->req.actual;
+			if (count > tmp) {
+				if ((tmp % ep->ep.maxpacket) != 0) {
+					DBG(dev, "%s: rx %db, space=%db\n",
+						ep->ep.name, count, tmp);
+					req->req.status = -EOVERFLOW;
+				}
+				count = tmp;
+			}
+			req->req.actual += count;
+			req->dma_going = 0;
+			/* complete request */
+			complete_req(ep, req, 0);
+
+			/* next request */
+			if (!list_empty(&ep->queue) && !ep->halted) {
+				req = list_entry(ep->queue.next,
+					struct udc_request,
+					queue);
+				/*
+				 * DMA may be already started by udc_queue()
+				 * called by gadget drivers completion
+				 * routine. This happens when queue
+				 * holds one request only.
+				 */
+				if (req->dma_going == 0) {
+					/* next dma */
+					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
+						goto finished;
+					/* write desc pointer */
+					writel(req->td_phys,
+						&ep->regs->desptr);
+					req->dma_going = 1;
+					/* enable DMA */
+					udc_set_rde(dev);
+				}
+			} else {
+				/*
+				 * implant BNA dummy descriptor to allow
+				 * RXFIFO opening by RDE
+				 */
+				if (ep->bna_dummy_req) {
+					/* write desc pointer */
+					writel(ep->bna_dummy_req->td_phys,
+						&ep->regs->desptr);
+					ep->bna_occurred = 0;
+				}
+
+				/*
+				 * schedule timer for setting RDE if queue
+				 * remains empty to allow ep0 packets pass
+				 * through
+				 */
+				if (set_rde != 0
+						&& !timer_pending(&udc_timer)) {
+					udc_timer.expires =
+						jiffies
+						+ HZ*UDC_RDE_TIMER_SECONDS;
+					set_rde = 1;
+					if (!stop_timer)
+						add_timer(&udc_timer);
+				}
+				if (ep->num != UDC_EP0OUT_IX)
+					dev->data_ep_queued = 0;
+			}
+
+		} else {
+			/*
+			* RX DMA must be reenabled for each desc in PPBDU mode
+			* and must be enabled for PPBNDU mode in case of BNA
+			*/
+			udc_set_rde(dev);
+		}
+
+	} else if (ep->cancel_transfer) {
+		ret_val = IRQ_HANDLED;
+		ep->cancel_transfer = 0;
+	}
+
+	/* check pending CNAKS */
+	if (cnak_pending) {
+		/* CNAk processing when rxfifo empty only */
+		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
+			udc_process_cnak_queue(dev);
+	}
+
+	/* clear OUT bits in ep status */
+	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
+finished:
+	return ret_val;
+}
+
+/* Interrupt handler for data IN traffic */
+static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
+{
+	irqreturn_t ret_val = IRQ_NONE;
+	u32 tmp;
+	u32 epsts;
+	struct udc_ep *ep;
+	struct udc_request *req;
+	struct udc_data_dma *td;
+	unsigned dma_done;
+	unsigned len;
+
+	ep = &dev->ep[ep_ix];
+
+	epsts = readl(&ep->regs->sts);
+	if (use_dma) {
+		/* BNA ? */
+		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
+			dev_err(&dev->pdev->dev,
+				"BNA ep%din occurred - DESPTR = %08lx\n",
+				ep->num,
+				(unsigned long) readl(&ep->regs->desptr));
+
+			/* clear BNA */
+			writel(epsts, &ep->regs->sts);
+			ret_val = IRQ_HANDLED;
+			goto finished;
+		}
+	}
+	/* HE event ? */
+	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
+		dev_err(&dev->pdev->dev,
+			"HE ep%dn occurred - DESPTR = %08lx\n",
+			ep->num, (unsigned long) readl(&ep->regs->desptr));
+
+		/* clear HE */
+		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
+		ret_val = IRQ_HANDLED;
+		goto finished;
+	}
+
+	/* DMA completion */
+	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
+		VDBG(dev, "TDC set- completion\n");
+		ret_val = IRQ_HANDLED;
+		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
+			req = list_entry(ep->queue.next,
+					struct udc_request, queue);
+			/*
+			 * length bytes transferred
+			 * check dma done of last desc. in PPBDU mode
+			 */
+			if (use_dma_ppb_du) {
+				td = udc_get_last_dma_desc(req);
+				if (td) {
+					dma_done =
+						AMD_GETBITS(td->status,
+						UDC_DMA_IN_STS_BS);
+					/* don't care DMA done */
+					req->req.actual = req->req.length;
+				}
+			} else {
+				/* assume all bytes transferred */
+				req->req.actual = req->req.length;
+			}
+
+			if (req->req.actual == req->req.length) {
+				/* complete req */
+				complete_req(ep, req, 0);
+				req->dma_going = 0;
+				/* further request available ? */
+				if (list_empty(&ep->queue)) {
+					/* disable interrupt */
+					tmp = readl(&dev->regs->ep_irqmsk);
+					tmp |= AMD_BIT(ep->num);
+					writel(tmp, &dev->regs->ep_irqmsk);
+				}
+			}
+		}
+		ep->cancel_transfer = 0;
+
+	}
+	/*
+	 * status reg has IN bit set and TDC not set (if TDC was handled,
+	 * IN must not be handled (UDC defect) ?
+	 */
+	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
+			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
+		ret_val = IRQ_HANDLED;
+		if (!list_empty(&ep->queue)) {
+			/* next request */
+			req = list_entry(ep->queue.next,
+					struct udc_request, queue);
+			/* FIFO mode */
+			if (!use_dma) {
+				/* write fifo */
+				udc_txfifo_write(ep, &req->req);
+				len = req->req.length - req->req.actual;
+				if (len > ep->ep.maxpacket)
+					len = ep->ep.maxpacket;
+				req->req.actual += len;
+				if (req->req.actual == req->req.length
+					|| (len != ep->ep.maxpacket)) {
+					/* complete req */
+					complete_req(ep, req, 0);
+				}
+			/* DMA */
+			} else if (req && !req->dma_going) {
+				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
+					req, req->td_data);
+				if (req->td_data) {
+
+					req->dma_going = 1;
+
+					/*
+					 * unset L bit of first desc.
+					 * for chain
+					 */
+					if (use_dma_ppb && req->req.length >
+							ep->ep.maxpacket) {
+						req->td_data->status &=
+							AMD_CLEAR_BIT(
+							UDC_DMA_IN_STS_L);
+					}
+
+					/* write desc pointer */
+					writel(req->td_phys, &ep->regs->desptr);
+
+					/* set HOST READY */
+					req->td_data->status =
+						AMD_ADDBITS(
+						req->td_data->status,
+						UDC_DMA_IN_STS_BS_HOST_READY,
+						UDC_DMA_IN_STS_BS);
+
+					/* set poll demand bit */
+					tmp = readl(&ep->regs->ctl);
+					tmp |= AMD_BIT(UDC_EPCTL_P);
+					writel(tmp, &ep->regs->ctl);
+				}
+			}
+
+		} else if (!use_dma && ep->in) {
+			/* disable interrupt */
+			tmp = readl(
+				&dev->regs->ep_irqmsk);
+			tmp |= AMD_BIT(ep->num);
+			writel(tmp,
+				&dev->regs->ep_irqmsk);
+		}
+	}
+	/* clear status bits */
+	writel(epsts, &ep->regs->sts);
+
+finished:
+	return ret_val;
+
+}
+
+/* Interrupt handler for Control OUT traffic */
+static irqreturn_t udc_control_out_isr(struct udc *dev)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+	irqreturn_t ret_val = IRQ_NONE;
+	u32 tmp;
+	int setup_supported;
+	u32 count;
+	int set = 0;
+	struct udc_ep	*ep;
+	struct udc_ep	*ep_tmp;
+
+	ep = &dev->ep[UDC_EP0OUT_IX];
+
+	/* clear irq */
+	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
+
+	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
+	/* check BNA and clear if set */
+	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
+		VDBG(dev, "ep0: BNA set\n");
+		writel(AMD_BIT(UDC_EPSTS_BNA),
+			&dev->ep[UDC_EP0OUT_IX].regs->sts);
+		ep->bna_occurred = 1;
+		ret_val = IRQ_HANDLED;
+		goto finished;
+	}
+
+	/* type of data: SETUP or DATA 0 bytes */
+	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
+	VDBG(dev, "data_typ = %x\n", tmp);
+
+	/* setup data */
+	if (tmp == UDC_EPSTS_OUT_SETUP) {
+		ret_val = IRQ_HANDLED;
+
+		ep->dev->stall_ep0in = 0;
+		dev->waiting_zlp_ack_ep0in = 0;
+
+		/* set NAK for EP0_IN */
+		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
+		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+		dev->ep[UDC_EP0IN_IX].naking = 1;
+		/* get setup data */
+		if (use_dma) {
+
+			/* clear OUT bits in ep status */
+			writel(UDC_EPSTS_OUT_CLEAR,
+				&dev->ep[UDC_EP0OUT_IX].regs->sts);
+
+			setup_data.data[0] =
+				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
+			setup_data.data[1] =
+				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
+			/* set HOST READY */
+			dev->ep[UDC_EP0OUT_IX].td_stp->status =
+					UDC_DMA_STP_STS_BS_HOST_READY;
+		} else {
+			/* read fifo */
+			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
+		}
+
+		/* determine direction of control data */
+		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
+			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
+			/* enable RDE */
+			udc_ep0_set_rde(dev);
+			set = 0;
+		} else {
+			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
+			/*
+			 * implant BNA dummy descriptor to allow RXFIFO opening
+			 * by RDE
+			 */
+			if (ep->bna_dummy_req) {
+				/* write desc pointer */
+				writel(ep->bna_dummy_req->td_phys,
+					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
+				ep->bna_occurred = 0;
+			}
+
+			set = 1;
+			dev->ep[UDC_EP0OUT_IX].naking = 1;
+			/*
+			 * setup timer for enabling RDE (to not enable
+			 * RXFIFO DMA for data to early)
+			 */
+			set_rde = 1;
+			if (!timer_pending(&udc_timer)) {
+				udc_timer.expires = jiffies +
+							HZ/UDC_RDE_TIMER_DIV;
+				if (!stop_timer)
+					add_timer(&udc_timer);
+			}
+		}
+
+		/*
+		 * mass storage reset must be processed here because
+		 * next packet may be a CLEAR_FEATURE HALT which would not
+		 * clear the stall bit when no STALL handshake was received
+		 * before (autostall can cause this)
+		 */
+		if (setup_data.data[0] == UDC_MSCRES_DWORD0
+				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
+			DBG(dev, "MSC Reset\n");
+			/*
+			 * clear stall bits
+			 * only one IN and OUT endpoints are handled
+			 */
+			ep_tmp = &udc->ep[UDC_EPIN_IX];
+			udc_set_halt(&ep_tmp->ep, 0);
+			ep_tmp = &udc->ep[UDC_EPOUT_IX];
+			udc_set_halt(&ep_tmp->ep, 0);
+		}
+
+		/* call gadget with setup data received */
+		spin_unlock(&dev->lock);
+		setup_supported = dev->driver->setup(&dev->gadget,
+						&setup_data.request);
+		spin_lock(&dev->lock);
+
+		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+		/* ep0 in returns data (not zlp) on IN phase */
+		if (setup_supported >= 0 && setup_supported <
+				UDC_EP0IN_MAXPACKET) {
+			/* clear NAK by writing CNAK in EP0_IN */
+			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+			dev->ep[UDC_EP0IN_IX].naking = 0;
+			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
+
+		/* if unsupported request then stall */
+		} else if (setup_supported < 0) {
+			tmp |= AMD_BIT(UDC_EPCTL_S);
+			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+		} else
+			dev->waiting_zlp_ack_ep0in = 1;
+
+
+		/* clear NAK by writing CNAK in EP0_OUT */
+		if (!set) {
+			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+			dev->ep[UDC_EP0OUT_IX].naking = 0;
+			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
+		}
+
+		if (!use_dma) {
+			/* clear OUT bits in ep status */
+			writel(UDC_EPSTS_OUT_CLEAR,
+				&dev->ep[UDC_EP0OUT_IX].regs->sts);
+		}
+
+	/* data packet 0 bytes */
+	} else if (tmp == UDC_EPSTS_OUT_DATA) {
+		/* clear OUT bits in ep status */
+		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
+
+		/* get setup data: only 0 packet */
+		if (use_dma) {
+			/* no req if 0 packet, just reactivate */
+			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
+				VDBG(dev, "ZLP\n");
+
+				/* set HOST READY */
+				dev->ep[UDC_EP0OUT_IX].td->status =
+					AMD_ADDBITS(
+					dev->ep[UDC_EP0OUT_IX].td->status,
+					UDC_DMA_OUT_STS_BS_HOST_READY,
+					UDC_DMA_OUT_STS_BS);
+				/* enable RDE */
+				udc_ep0_set_rde(dev);
+				ret_val = IRQ_HANDLED;
+
+			} else {
+				/* control write */
+				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
+				/* re-program desc. pointer for possible ZLPs */
+				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
+					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
+				/* enable RDE */
+				udc_ep0_set_rde(dev);
+			}
+		} else {
+
+			/* received number bytes */
+			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
+			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
+			/* out data for fifo mode not working */
+			count = 0;
+
+			/* 0 packet or real data ? */
+			if (count != 0) {
+				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
+			} else {
+				/* dummy read confirm */
+				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
+				ret_val = IRQ_HANDLED;
+			}
+		}
+	}
+
+	/* check pending CNAKS */
+	if (cnak_pending) {
+		/* CNAk processing when rxfifo empty only */
+		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
+			udc_process_cnak_queue(dev);
+	}
+
+finished:
+	return ret_val;
+}
+
+/* Interrupt handler for Control IN traffic */
+static irqreturn_t udc_control_in_isr(struct udc *dev)
+{
+	irqreturn_t ret_val = IRQ_NONE;
+	u32 tmp;
+	struct udc_ep *ep;
+	struct udc_request *req;
+	unsigned len;
+
+	ep = &dev->ep[UDC_EP0IN_IX];
+
+	/* clear irq */
+	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
+
+	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
+	/* DMA completion */
+	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
+		VDBG(dev, "isr: TDC clear\n");
+		ret_val = IRQ_HANDLED;
+
+		/* clear TDC bit */
+		writel(AMD_BIT(UDC_EPSTS_TDC),
+				&dev->ep[UDC_EP0IN_IX].regs->sts);
+
+	/* status reg has IN bit set ? */
+	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
+		ret_val = IRQ_HANDLED;
+
+		if (ep->dma) {
+			/* clear IN bit */
+			writel(AMD_BIT(UDC_EPSTS_IN),
+				&dev->ep[UDC_EP0IN_IX].regs->sts);
+		}
+		if (dev->stall_ep0in) {
+			DBG(dev, "stall ep0in\n");
+			/* halt ep0in */
+			tmp = readl(&ep->regs->ctl);
+			tmp |= AMD_BIT(UDC_EPCTL_S);
+			writel(tmp, &ep->regs->ctl);
+		} else {
+			if (!list_empty(&ep->queue)) {
+				/* next request */
+				req = list_entry(ep->queue.next,
+						struct udc_request, queue);
+
+				if (ep->dma) {
+					/* write desc pointer */
+					writel(req->td_phys, &ep->regs->desptr);
+					/* set HOST READY */
+					req->td_data->status =
+						AMD_ADDBITS(
+						req->td_data->status,
+						UDC_DMA_STP_STS_BS_HOST_READY,
+						UDC_DMA_STP_STS_BS);
+
+					/* set poll demand bit */
+					tmp =
+					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+					tmp |= AMD_BIT(UDC_EPCTL_P);
+					writel(tmp,
+					&dev->ep[UDC_EP0IN_IX].regs->ctl);
+
+					/* all bytes will be transferred */
+					req->req.actual = req->req.length;
+
+					/* complete req */
+					complete_req(ep, req, 0);
+
+				} else {
+					/* write fifo */
+					udc_txfifo_write(ep, &req->req);
+
+					/* lengh bytes transferred */
+					len = req->req.length - req->req.actual;
+					if (len > ep->ep.maxpacket)
+						len = ep->ep.maxpacket;
+
+					req->req.actual += len;
+					if (req->req.actual == req->req.length
+						|| (len != ep->ep.maxpacket)) {
+						/* complete req */
+						complete_req(ep, req, 0);
+					}
+				}
+
+			}
+		}
+		ep->halted = 0;
+		dev->stall_ep0in = 0;
+		if (!ep->dma) {
+			/* clear IN bit */
+			writel(AMD_BIT(UDC_EPSTS_IN),
+				&dev->ep[UDC_EP0IN_IX].regs->sts);
+		}
+	}
+
+	return ret_val;
+}
+
+
+/* Interrupt handler for global device events */
+static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+	irqreturn_t ret_val = IRQ_NONE;
+	u32 tmp;
+	u32 cfg;
+	struct udc_ep *ep;
+	u16 i;
+	u8 udc_csr_epix;
+
+	/* SET_CONFIG irq ? */
+	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
+		ret_val = IRQ_HANDLED;
+
+		/* read config value */
+		tmp = readl(&dev->regs->sts);
+		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
+		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
+		dev->cur_config = cfg;
+		dev->set_cfg_not_acked = 1;
+
+		/* make usb request for gadget driver */
+		memset(&setup_data, 0 , sizeof(union udc_setup_data));
+		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
+		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
+
+		/* programm the NE registers */
+		for (i = 0; i < UDC_EP_NUM; i++) {
+			ep = &dev->ep[i];
+			if (ep->in) {
+
+				/* ep ix in UDC CSR register space */
+				udc_csr_epix = ep->num;
+
+
+			/* OUT ep */
+			} else {
+				/* ep ix in UDC CSR register space */
+				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+			}
+
+			tmp = readl(&dev->csr->ne[udc_csr_epix]);
+			/* ep cfg */
+			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
+						UDC_CSR_NE_CFG);
+			/* write reg */
+			writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+			/* clear stall bits */
+			ep->halted = 0;
+			tmp = readl(&ep->regs->ctl);
+			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+			writel(tmp, &ep->regs->ctl);
+		}
+		/* call gadget zero with setup data received */
+		spin_unlock(&dev->lock);
+		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
+		spin_lock(&dev->lock);
+
+	} /* SET_INTERFACE ? */
+	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
+		ret_val = IRQ_HANDLED;
+
+		dev->set_cfg_not_acked = 1;
+		/* read interface and alt setting values */
+		tmp = readl(&dev->regs->sts);
+		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
+		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
+
+		/* make usb request for gadget driver */
+		memset(&setup_data, 0 , sizeof(union udc_setup_data));
+		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
+		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
+		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
+		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
+
+		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
+				dev->cur_alt, dev->cur_intf);
+
+		/* programm the NE registers */
+		for (i = 0; i < UDC_EP_NUM; i++) {
+			ep = &dev->ep[i];
+			if (ep->in) {
+
+				/* ep ix in UDC CSR register space */
+				udc_csr_epix = ep->num;
+
+
+			/* OUT ep */
+			} else {
+				/* ep ix in UDC CSR register space */
+				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+			}
+
+			/* UDC CSR reg */
+			/* set ep values */
+			tmp = readl(&dev->csr->ne[udc_csr_epix]);
+			/* ep interface */
+			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
+						UDC_CSR_NE_INTF);
+			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
+			/* ep alt */
+			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
+						UDC_CSR_NE_ALT);
+			/* write reg */
+			writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+			/* clear stall bits */
+			ep->halted = 0;
+			tmp = readl(&ep->regs->ctl);
+			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+			writel(tmp, &ep->regs->ctl);
+		}
+
+		/* call gadget zero with setup data received */
+		spin_unlock(&dev->lock);
+		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
+		spin_lock(&dev->lock);
+
+	} /* USB reset */
+	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
+		DBG(dev, "USB Reset interrupt\n");
+		ret_val = IRQ_HANDLED;
+
+		/* allow soft reset when suspend occurs */
+		soft_reset_occured = 0;
+
+		dev->waiting_zlp_ack_ep0in = 0;
+		dev->set_cfg_not_acked = 0;
+
+		/* mask not needed interrupts */
+		udc_mask_unused_interrupts(dev);
+
+		/* call gadget to resume and reset configs etc. */
+		spin_unlock(&dev->lock);
+		if (dev->sys_suspended && dev->driver->resume) {
+			dev->driver->resume(&dev->gadget);
+			dev->sys_suspended = 0;
+		}
+		dev->driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+
+		/* disable ep0 to empty req queue */
+		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
+		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
+
+		/* soft reset when rxfifo not empty */
+		tmp = readl(&dev->regs->sts);
+		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
+				&& !soft_reset_after_usbreset_occured) {
+			udc_soft_reset(dev);
+			soft_reset_after_usbreset_occured++;
+		}
+
+		/*
+		 * DMA reset to kill potential old DMA hw hang,
+		 * POLL bit is already reset by ep_init() through
+		 * disconnect()
+		 */
+		DBG(dev, "DMA machine reset\n");
+		tmp = readl(&dev->regs->cfg);
+		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
+		writel(tmp, &dev->regs->cfg);
+
+		/* put into initial config */
+		udc_basic_init(dev);
+
+		/* enable device setup interrupts */
+		udc_enable_dev_setup_interrupts(dev);
+
+		/* enable suspend interrupt */
+		tmp = readl(&dev->regs->irqmsk);
+		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
+		writel(tmp, &dev->regs->irqmsk);
+
+	} /* USB suspend */
+	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
+		DBG(dev, "USB Suspend interrupt\n");
+		ret_val = IRQ_HANDLED;
+		if (dev->driver->suspend) {
+			spin_unlock(&dev->lock);
+			dev->sys_suspended = 1;
+			dev->driver->suspend(&dev->gadget);
+			spin_lock(&dev->lock);
+		}
+	} /* new speed ? */
+	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
+		DBG(dev, "ENUM interrupt\n");
+		ret_val = IRQ_HANDLED;
+		soft_reset_after_usbreset_occured = 0;
+
+		/* disable ep0 to empty req queue */
+		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
+		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
+
+		/* link up all endpoints */
+		udc_setup_endpoints(dev);
+		dev_info(&dev->pdev->dev, "Connect: %s\n",
+			 usb_speed_string(dev->gadget.speed));
+
+		/* init ep 0 */
+		activate_control_endpoints(dev);
+
+		/* enable ep0 interrupts */
+		udc_enable_ep0_interrupts(dev);
+	}
+	/* session valid change interrupt */
+	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
+		DBG(dev, "USB SVC interrupt\n");
+		ret_val = IRQ_HANDLED;
+
+		/* check that session is not valid to detect disconnect */
+		tmp = readl(&dev->regs->sts);
+		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
+			/* disable suspend interrupt */
+			tmp = readl(&dev->regs->irqmsk);
+			tmp |= AMD_BIT(UDC_DEVINT_US);
+			writel(tmp, &dev->regs->irqmsk);
+			DBG(dev, "USB Disconnect (session valid low)\n");
+			/* cleanup on disconnect */
+			usb_disconnect(udc);
+		}
+
+	}
+
+	return ret_val;
+}
+
+/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
+static irqreturn_t udc_irq(int irq, void *pdev)
+{
+	struct udc *dev = pdev;
+	u32 reg;
+	u16 i;
+	u32 ep_irq;
+	irqreturn_t ret_val = IRQ_NONE;
+
+	spin_lock(&dev->lock);
+
+	/* check for ep irq */
+	reg = readl(&dev->regs->ep_irqsts);
+	if (reg) {
+		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
+			ret_val |= udc_control_out_isr(dev);
+		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
+			ret_val |= udc_control_in_isr(dev);
+
+		/*
+		 * data endpoint
+		 * iterate ep's
+		 */
+		for (i = 1; i < UDC_EP_NUM; i++) {
+			ep_irq = 1 << i;
+			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
+				continue;
+
+			/* clear irq status */
+			writel(ep_irq, &dev->regs->ep_irqsts);
+
+			/* irq for out ep ? */
+			if (i > UDC_EPIN_NUM)
+				ret_val |= udc_data_out_isr(dev, i);
+			else
+				ret_val |= udc_data_in_isr(dev, i);
+		}
+
+	}
+
+
+	/* check for dev irq */
+	reg = readl(&dev->regs->irqsts);
+	if (reg) {
+		/* clear irq */
+		writel(reg, &dev->regs->irqsts);
+		ret_val |= udc_dev_isr(dev, reg);
+	}
+
+
+	spin_unlock(&dev->lock);
+	return ret_val;
+}
+
+/* Tears down device */
+static void gadget_release(struct device *pdev)
+{
+	struct amd5536udc *dev = dev_get_drvdata(pdev);
+	kfree(dev);
+}
+
+/* Cleanup on device remove */
+static void udc_remove(struct udc *dev)
+{
+	/* remove timer */
+	stop_timer++;
+	if (timer_pending(&udc_timer))
+		wait_for_completion(&on_exit);
+	if (udc_timer.data)
+		del_timer_sync(&udc_timer);
+	/* remove pollstall timer */
+	stop_pollstall_timer++;
+	if (timer_pending(&udc_pollstall_timer))
+		wait_for_completion(&on_pollstall_exit);
+	if (udc_pollstall_timer.data)
+		del_timer_sync(&udc_pollstall_timer);
+	udc = NULL;
+}
+
+/* Reset all pci context */
+static void udc_pci_remove(struct pci_dev *pdev)
+{
+	struct udc		*dev;
+
+	dev = pci_get_drvdata(pdev);
+
+	usb_del_gadget_udc(&udc->gadget);
+	/* gadget driver must not be registered */
+	BUG_ON(dev->driver != NULL);
+
+	/* dma pool cleanup */
+	if (dev->data_requests)
+		pci_pool_destroy(dev->data_requests);
+
+	if (dev->stp_requests) {
+		/* cleanup DMA desc's for ep0in */
+		pci_pool_free(dev->stp_requests,
+			dev->ep[UDC_EP0OUT_IX].td_stp,
+			dev->ep[UDC_EP0OUT_IX].td_stp_dma);
+		pci_pool_free(dev->stp_requests,
+			dev->ep[UDC_EP0OUT_IX].td,
+			dev->ep[UDC_EP0OUT_IX].td_phys);
+
+		pci_pool_destroy(dev->stp_requests);
+	}
+
+	/* reset controller */
+	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
+	if (dev->irq_registered)
+		free_irq(pdev->irq, dev);
+	if (dev->regs)
+		iounmap(dev->regs);
+	if (dev->mem_region)
+		release_mem_region(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
+	if (dev->active)
+		pci_disable_device(pdev);
+
+	device_unregister(&dev->gadget.dev);
+	pci_set_drvdata(pdev, NULL);
+
+	udc_remove(dev);
+}
+
+/* create dma pools on init */
+static int init_dma_pools(struct udc *dev)
+{
+	struct udc_stp_dma	*td_stp;
+	struct udc_data_dma	*td_data;
+	int retval;
+
+	/* consistent DMA mode setting ? */
+	if (use_dma_ppb) {
+		use_dma_bufferfill_mode = 0;
+	} else {
+		use_dma_ppb_du = 0;
+		use_dma_bufferfill_mode = 1;
+	}
+
+	/* DMA setup */
+	dev->data_requests = dma_pool_create("data_requests", NULL,
+		sizeof(struct udc_data_dma), 0, 0);
+	if (!dev->data_requests) {
+		DBG(dev, "can't get request data pool\n");
+		retval = -ENOMEM;
+		goto finished;
+	}
+
+	/* EP0 in dma regs = dev control regs */
+	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
+
+	/* dma desc for setup data */
+	dev->stp_requests = dma_pool_create("setup requests", NULL,
+		sizeof(struct udc_stp_dma), 0, 0);
+	if (!dev->stp_requests) {
+		DBG(dev, "can't get stp request pool\n");
+		retval = -ENOMEM;
+		goto finished;
+	}
+	/* setup */
+	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
+				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
+	if (td_stp == NULL) {
+		retval = -ENOMEM;
+		goto finished;
+	}
+	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
+
+	/* data: 0 packets !? */
+	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
+				&dev->ep[UDC_EP0OUT_IX].td_phys);
+	if (td_data == NULL) {
+		retval = -ENOMEM;
+		goto finished;
+	}
+	dev->ep[UDC_EP0OUT_IX].td = td_data;
+	return 0;
+
+finished:
+	return retval;
+}
+
+/* Called by pci bus driver to init pci context */
+static int udc_pci_probe(
+	struct pci_dev *pdev,
+	const struct pci_device_id *id
+)
+{
+	struct udc		*dev;
+	unsigned long		resource;
+	unsigned long		len;
+	int			retval = 0;
+
+	/* one udc only */
+	if (udc) {
+		dev_dbg(&pdev->dev, "already probed\n");
+		return -EBUSY;
+	}
+
+	/* init */
+	dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
+	if (!dev) {
+		retval = -ENOMEM;
+		goto finished;
+	}
+
+	/* pci setup */
+	if (pci_enable_device(pdev) < 0) {
+		kfree(dev);
+		dev = NULL;
+		retval = -ENODEV;
+		goto finished;
+	}
+	dev->active = 1;
+
+	/* PCI resource allocation */
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+
+	if (!request_mem_region(resource, len, name)) {
+		dev_dbg(&pdev->dev, "pci device used already\n");
+		kfree(dev);
+		dev = NULL;
+		retval = -EBUSY;
+		goto finished;
+	}
+	dev->mem_region = 1;
+
+	dev->virt_addr = ioremap_nocache(resource, len);
+	if (dev->virt_addr == NULL) {
+		dev_dbg(&pdev->dev, "start address cannot be mapped\n");
+		kfree(dev);
+		dev = NULL;
+		retval = -EFAULT;
+		goto finished;
+	}
+
+	if (!pdev->irq) {
+		dev_err(&dev->pdev->dev, "irq not set\n");
+		kfree(dev);
+		dev = NULL;
+		retval = -ENODEV;
+		goto finished;
+	}
+
+	spin_lock_init(&dev->lock);
+	/* udc csr registers base */
+	dev->csr = dev->virt_addr + UDC_CSR_ADDR;
+	/* dev registers base */
+	dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
+	/* ep registers base */
+	dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
+	/* fifo's base */
+	dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
+	dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
+
+	if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
+		dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
+		kfree(dev);
+		dev = NULL;
+		retval = -EBUSY;
+		goto finished;
+	}
+	dev->irq_registered = 1;
+
+	pci_set_drvdata(pdev, dev);
+
+	/* chip revision for Hs AMD5536 */
+	dev->chiprev = pdev->revision;
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	/* init dma pools */
+	if (use_dma) {
+		retval = init_dma_pools(dev);
+		if (retval != 0)
+			goto finished;
+	}
+
+	dev->phys_addr = resource;
+	dev->irq = pdev->irq;
+	dev->pdev = pdev;
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+	/* general probing */
+	if (udc_probe(dev) == 0)
+		return 0;
+
+finished:
+	if (dev)
+		udc_pci_remove(pdev);
+	return retval;
+}
+
+/* general probe */
+static int udc_probe(struct udc *dev)
+{
+	char		tmp[128];
+	u32		reg;
+	int		retval;
+
+	/* mark timer as not initialized */
+	udc_timer.data = 0;
+	udc_pollstall_timer.data = 0;
+
+	/* device struct setup */
+	dev->gadget.ops = &udc_ops;
+
+	dev_set_name(&dev->gadget.dev, "gadget");
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = name;
+	dev->gadget.max_speed = USB_SPEED_HIGH;
+
+	/* init registers, interrupts, ... */
+	startup_registers(dev);
+
+	dev_info(&dev->pdev->dev, "%s\n", mod_desc);
+
+	snprintf(tmp, sizeof tmp, "%d", dev->irq);
+	dev_info(&dev->pdev->dev,
+		"irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
+		tmp, dev->phys_addr, dev->chiprev,
+		(dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
+	strcpy(tmp, UDC_DRIVER_VERSION_STRING);
+	if (dev->chiprev == UDC_HSA0_REV) {
+		dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
+		retval = -ENODEV;
+		goto finished;
+	}
+	dev_info(&dev->pdev->dev,
+		"driver version: %s(for Geode5536 B1)\n", tmp);
+	udc = dev;
+
+	retval = usb_add_gadget_udc(&udc->pdev->dev, &dev->gadget);
+	if (retval)
+		goto finished;
+
+	retval = device_register(&dev->gadget.dev);
+	if (retval) {
+		usb_del_gadget_udc(&dev->gadget);
+		put_device(&dev->gadget.dev);
+		goto finished;
+	}
+
+	/* timer init */
+	init_timer(&udc_timer);
+	udc_timer.function = udc_timer_function;
+	udc_timer.data = 1;
+	/* timer pollstall init */
+	init_timer(&udc_pollstall_timer);
+	udc_pollstall_timer.function = udc_pollstall_timer_function;
+	udc_pollstall_timer.data = 1;
+
+	/* set SD */
+	reg = readl(&dev->regs->ctl);
+	reg |= AMD_BIT(UDC_DEVCTL_SD);
+	writel(reg, &dev->regs->ctl);
+
+	/* print dev register info */
+	print_regs(dev);
+
+	return 0;
+
+finished:
+	return retval;
+}
+
+/* Initiates a remote wakeup */
+static int udc_remote_wakeup(struct udc *dev)
+{
+	unsigned long flags;
+	u32 tmp;
+
+	DBG(dev, "UDC initiates remote wakeup\n");
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	tmp = readl(&dev->regs->ctl);
+	tmp |= AMD_BIT(UDC_DEVCTL_RES);
+	writel(tmp, &dev->regs->ctl);
+	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
+	writel(tmp, &dev->regs->ctl);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+/* PCI device parameters */
+static DEFINE_PCI_DEVICE_TABLE(pci_id) = {
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
+		.class =	(PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+		.class_mask =	0xffffffff,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(pci, pci_id);
+
+/* PCI functions */
+static struct pci_driver udc_pci_driver = {
+	.name =		(char *) name,
+	.id_table =	pci_id,
+	.probe =	udc_pci_probe,
+	.remove =	udc_pci_remove,
+};
+
+/* Inits driver */
+static int __init init(void)
+{
+	return pci_register_driver(&udc_pci_driver);
+}
+module_init(init);
+
+/* Cleans driver */
+static void __exit cleanup(void)
+{
+	pci_unregister_driver(&udc_pci_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
+MODULE_AUTHOR("Thomas Dahlmann");
+MODULE_LICENSE("GPL");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/amd5536udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/amd5536udc.h
new file mode 100644
index 0000000..f87e29c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/amd5536udc.h
@@ -0,0 +1,617 @@
+/*
+ * amd5536.h -- header for AMD 5536 UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2007 AMD (http://www.amd.com)
+ * Author: Thomas Dahlmann
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AMD5536UDC_H
+#define AMD5536UDC_H
+
+/* various constants */
+#define UDC_RDE_TIMER_SECONDS		1
+#define UDC_RDE_TIMER_DIV		10
+#define UDC_POLLSTALL_TIMER_USECONDS	500
+
+/* Hs AMD5536 chip rev. */
+#define UDC_HSA0_REV 1
+#define UDC_HSB1_REV 2
+
+/*
+ * SETUP usb commands
+ * needed, because some SETUP's are handled in hw, but must be passed to
+ * gadget driver above
+ * SET_CONFIG
+ */
+#define UDC_SETCONFIG_DWORD0			0x00000900
+#define UDC_SETCONFIG_DWORD0_VALUE_MASK		0xffff0000
+#define UDC_SETCONFIG_DWORD0_VALUE_OFS		16
+
+#define UDC_SETCONFIG_DWORD1			0x00000000
+
+/* SET_INTERFACE */
+#define UDC_SETINTF_DWORD0			0x00000b00
+#define UDC_SETINTF_DWORD0_ALT_MASK		0xffff0000
+#define UDC_SETINTF_DWORD0_ALT_OFS		16
+
+#define UDC_SETINTF_DWORD1			0x00000000
+#define UDC_SETINTF_DWORD1_INTF_MASK		0x0000ffff
+#define UDC_SETINTF_DWORD1_INTF_OFS		0
+
+/* Mass storage reset */
+#define UDC_MSCRES_DWORD0			0x0000ff21
+#define UDC_MSCRES_DWORD1			0x00000000
+
+/* Global CSR's -------------------------------------------------------------*/
+#define UDC_CSR_ADDR				0x500
+
+/* EP NE bits */
+/* EP number */
+#define UDC_CSR_NE_NUM_MASK			0x0000000f
+#define UDC_CSR_NE_NUM_OFS			0
+/* EP direction */
+#define UDC_CSR_NE_DIR_MASK			0x00000010
+#define UDC_CSR_NE_DIR_OFS			4
+/* EP type */
+#define UDC_CSR_NE_TYPE_MASK			0x00000060
+#define UDC_CSR_NE_TYPE_OFS			5
+/* EP config number */
+#define UDC_CSR_NE_CFG_MASK			0x00000780
+#define UDC_CSR_NE_CFG_OFS			7
+/* EP interface number */
+#define UDC_CSR_NE_INTF_MASK			0x00007800
+#define UDC_CSR_NE_INTF_OFS			11
+/* EP alt setting */
+#define UDC_CSR_NE_ALT_MASK			0x00078000
+#define UDC_CSR_NE_ALT_OFS			15
+
+/* max pkt */
+#define UDC_CSR_NE_MAX_PKT_MASK			0x3ff80000
+#define UDC_CSR_NE_MAX_PKT_OFS			19
+
+/* Device Config Register ---------------------------------------------------*/
+#define UDC_DEVCFG_ADDR				0x400
+
+#define UDC_DEVCFG_SOFTRESET			31
+#define UDC_DEVCFG_HNPSFEN			30
+#define UDC_DEVCFG_DMARST			29
+#define UDC_DEVCFG_SET_DESC			18
+#define UDC_DEVCFG_CSR_PRG			17
+#define UDC_DEVCFG_STATUS			7
+#define UDC_DEVCFG_DIR				6
+#define UDC_DEVCFG_PI				5
+#define UDC_DEVCFG_SS				4
+#define UDC_DEVCFG_SP				3
+#define UDC_DEVCFG_RWKP				2
+
+#define UDC_DEVCFG_SPD_MASK			0x3
+#define UDC_DEVCFG_SPD_OFS			0
+#define UDC_DEVCFG_SPD_HS			0x0
+#define UDC_DEVCFG_SPD_FS			0x1
+#define UDC_DEVCFG_SPD_LS			0x2
+/*#define UDC_DEVCFG_SPD_FS			0x3*/
+
+
+/* Device Control Register --------------------------------------------------*/
+#define UDC_DEVCTL_ADDR				0x404
+
+#define UDC_DEVCTL_THLEN_MASK			0xff000000
+#define UDC_DEVCTL_THLEN_OFS			24
+
+#define UDC_DEVCTL_BRLEN_MASK			0x00ff0000
+#define UDC_DEVCTL_BRLEN_OFS			16
+
+#define UDC_DEVCTL_CSR_DONE			13
+#define UDC_DEVCTL_DEVNAK			12
+#define UDC_DEVCTL_SD				10
+#define UDC_DEVCTL_MODE				9
+#define UDC_DEVCTL_BREN				8
+#define UDC_DEVCTL_THE				7
+#define UDC_DEVCTL_BF				6
+#define UDC_DEVCTL_BE				5
+#define UDC_DEVCTL_DU				4
+#define UDC_DEVCTL_TDE				3
+#define UDC_DEVCTL_RDE				2
+#define UDC_DEVCTL_RES				0
+
+
+/* Device Status Register ---------------------------------------------------*/
+#define UDC_DEVSTS_ADDR				0x408
+
+#define UDC_DEVSTS_TS_MASK			0xfffc0000
+#define UDC_DEVSTS_TS_OFS			18
+
+#define UDC_DEVSTS_SESSVLD			17
+#define UDC_DEVSTS_PHY_ERROR			16
+#define UDC_DEVSTS_RXFIFO_EMPTY			15
+
+#define UDC_DEVSTS_ENUM_SPEED_MASK		0x00006000
+#define UDC_DEVSTS_ENUM_SPEED_OFS		13
+#define UDC_DEVSTS_ENUM_SPEED_FULL		1
+#define UDC_DEVSTS_ENUM_SPEED_HIGH		0
+
+#define UDC_DEVSTS_SUSP				12
+
+#define UDC_DEVSTS_ALT_MASK			0x00000f00
+#define UDC_DEVSTS_ALT_OFS			8
+
+#define UDC_DEVSTS_INTF_MASK			0x000000f0
+#define UDC_DEVSTS_INTF_OFS			4
+
+#define UDC_DEVSTS_CFG_MASK			0x0000000f
+#define UDC_DEVSTS_CFG_OFS			0
+
+
+/* Device Interrupt Register ------------------------------------------------*/
+#define UDC_DEVINT_ADDR				0x40c
+
+#define UDC_DEVINT_SVC				7
+#define UDC_DEVINT_ENUM				6
+#define UDC_DEVINT_SOF				5
+#define UDC_DEVINT_US				4
+#define UDC_DEVINT_UR				3
+#define UDC_DEVINT_ES				2
+#define UDC_DEVINT_SI				1
+#define UDC_DEVINT_SC				0
+
+/* Device Interrupt Mask Register -------------------------------------------*/
+#define UDC_DEVINT_MSK_ADDR			0x410
+
+#define UDC_DEVINT_MSK				0x7f
+
+/* Endpoint Interrupt Register ----------------------------------------------*/
+#define UDC_EPINT_ADDR				0x414
+
+#define UDC_EPINT_OUT_MASK			0xffff0000
+#define UDC_EPINT_OUT_OFS			16
+#define UDC_EPINT_IN_MASK			0x0000ffff
+#define UDC_EPINT_IN_OFS			0
+
+#define UDC_EPINT_IN_EP0			0
+#define UDC_EPINT_IN_EP1			1
+#define UDC_EPINT_IN_EP2			2
+#define UDC_EPINT_IN_EP3			3
+#define UDC_EPINT_OUT_EP0			16
+#define UDC_EPINT_OUT_EP1			17
+#define UDC_EPINT_OUT_EP2			18
+#define UDC_EPINT_OUT_EP3			19
+
+#define UDC_EPINT_EP0_ENABLE_MSK		0x001e001e
+
+/* Endpoint Interrupt Mask Register -----------------------------------------*/
+#define UDC_EPINT_MSK_ADDR			0x418
+
+#define UDC_EPINT_OUT_MSK_MASK			0xffff0000
+#define UDC_EPINT_OUT_MSK_OFS			16
+#define UDC_EPINT_IN_MSK_MASK			0x0000ffff
+#define UDC_EPINT_IN_MSK_OFS			0
+
+#define UDC_EPINT_MSK_DISABLE_ALL		0xffffffff
+/* mask non-EP0 endpoints */
+#define UDC_EPDATAINT_MSK_DISABLE		0xfffefffe
+/* mask all dev interrupts */
+#define UDC_DEV_MSK_DISABLE			0x7f
+
+/* Endpoint-specific CSR's --------------------------------------------------*/
+#define UDC_EPREGS_ADDR				0x0
+#define UDC_EPIN_REGS_ADDR			0x0
+#define UDC_EPOUT_REGS_ADDR			0x200
+
+#define UDC_EPCTL_ADDR				0x0
+
+#define UDC_EPCTL_RRDY				9
+#define UDC_EPCTL_CNAK				8
+#define UDC_EPCTL_SNAK				7
+#define UDC_EPCTL_NAK				6
+
+#define UDC_EPCTL_ET_MASK			0x00000030
+#define UDC_EPCTL_ET_OFS			4
+#define UDC_EPCTL_ET_CONTROL			0
+#define UDC_EPCTL_ET_ISO			1
+#define UDC_EPCTL_ET_BULK			2
+#define UDC_EPCTL_ET_INTERRUPT			3
+
+#define UDC_EPCTL_P				3
+#define UDC_EPCTL_SN				2
+#define UDC_EPCTL_F				1
+#define UDC_EPCTL_S				0
+
+/* Endpoint Status Registers ------------------------------------------------*/
+#define UDC_EPSTS_ADDR				0x4
+
+#define UDC_EPSTS_RX_PKT_SIZE_MASK		0x007ff800
+#define UDC_EPSTS_RX_PKT_SIZE_OFS		11
+
+#define UDC_EPSTS_TDC				10
+#define UDC_EPSTS_HE				9
+#define UDC_EPSTS_BNA				7
+#define UDC_EPSTS_IN				6
+
+#define UDC_EPSTS_OUT_MASK			0x00000030
+#define UDC_EPSTS_OUT_OFS			4
+#define UDC_EPSTS_OUT_DATA			1
+#define UDC_EPSTS_OUT_DATA_CLEAR		0x10
+#define UDC_EPSTS_OUT_SETUP			2
+#define UDC_EPSTS_OUT_SETUP_CLEAR		0x20
+#define UDC_EPSTS_OUT_CLEAR			0x30
+
+/* Endpoint Buffer Size IN/ Receive Packet Frame Number OUT Registers ------*/
+#define UDC_EPIN_BUFF_SIZE_ADDR			0x8
+#define UDC_EPOUT_FRAME_NUMBER_ADDR		0x8
+
+#define UDC_EPIN_BUFF_SIZE_MASK			0x0000ffff
+#define UDC_EPIN_BUFF_SIZE_OFS			0
+/* EP0in txfifo = 128 bytes*/
+#define UDC_EPIN0_BUFF_SIZE			32
+/* EP0in fullspeed txfifo = 128 bytes*/
+#define UDC_FS_EPIN0_BUFF_SIZE			32
+
+/* fifo size mult = fifo size / max packet */
+#define UDC_EPIN_BUFF_SIZE_MULT			2
+
+/* EPin data fifo size = 1024 bytes DOUBLE BUFFERING */
+#define UDC_EPIN_BUFF_SIZE			256
+/* EPin small INT data fifo size = 128 bytes */
+#define UDC_EPIN_SMALLINT_BUFF_SIZE		32
+
+/* EPin fullspeed data fifo size = 128 bytes DOUBLE BUFFERING */
+#define UDC_FS_EPIN_BUFF_SIZE			32
+
+#define UDC_EPOUT_FRAME_NUMBER_MASK		0x0000ffff
+#define UDC_EPOUT_FRAME_NUMBER_OFS		0
+
+/* Endpoint Buffer Size OUT/Max Packet Size Registers -----------------------*/
+#define UDC_EPOUT_BUFF_SIZE_ADDR		0x0c
+#define UDC_EP_MAX_PKT_SIZE_ADDR		0x0c
+
+#define UDC_EPOUT_BUFF_SIZE_MASK		0xffff0000
+#define UDC_EPOUT_BUFF_SIZE_OFS			16
+#define UDC_EP_MAX_PKT_SIZE_MASK		0x0000ffff
+#define UDC_EP_MAX_PKT_SIZE_OFS			0
+/* EP0in max packet size = 64 bytes */
+#define UDC_EP0IN_MAX_PKT_SIZE			64
+/* EP0out max packet size = 64 bytes */
+#define UDC_EP0OUT_MAX_PKT_SIZE			64
+/* EP0in fullspeed max packet size = 64 bytes */
+#define UDC_FS_EP0IN_MAX_PKT_SIZE		64
+/* EP0out fullspeed max packet size = 64 bytes */
+#define UDC_FS_EP0OUT_MAX_PKT_SIZE		64
+
+/*
+ * Endpoint dma descriptors ------------------------------------------------
+ *
+ * Setup data, Status dword
+ */
+#define UDC_DMA_STP_STS_CFG_MASK		0x0fff0000
+#define UDC_DMA_STP_STS_CFG_OFS			16
+#define UDC_DMA_STP_STS_CFG_ALT_MASK		0x000f0000
+#define UDC_DMA_STP_STS_CFG_ALT_OFS		16
+#define UDC_DMA_STP_STS_CFG_INTF_MASK		0x00f00000
+#define UDC_DMA_STP_STS_CFG_INTF_OFS		20
+#define UDC_DMA_STP_STS_CFG_NUM_MASK		0x0f000000
+#define UDC_DMA_STP_STS_CFG_NUM_OFS		24
+#define UDC_DMA_STP_STS_RX_MASK			0x30000000
+#define UDC_DMA_STP_STS_RX_OFS			28
+#define UDC_DMA_STP_STS_BS_MASK			0xc0000000
+#define UDC_DMA_STP_STS_BS_OFS			30
+#define UDC_DMA_STP_STS_BS_HOST_READY		0
+#define UDC_DMA_STP_STS_BS_DMA_BUSY		1
+#define UDC_DMA_STP_STS_BS_DMA_DONE		2
+#define UDC_DMA_STP_STS_BS_HOST_BUSY		3
+/* IN data, Status dword */
+#define UDC_DMA_IN_STS_TXBYTES_MASK		0x0000ffff
+#define UDC_DMA_IN_STS_TXBYTES_OFS		0
+#define	UDC_DMA_IN_STS_FRAMENUM_MASK		0x07ff0000
+#define UDC_DMA_IN_STS_FRAMENUM_OFS		0
+#define UDC_DMA_IN_STS_L			27
+#define UDC_DMA_IN_STS_TX_MASK			0x30000000
+#define UDC_DMA_IN_STS_TX_OFS			28
+#define UDC_DMA_IN_STS_BS_MASK			0xc0000000
+#define UDC_DMA_IN_STS_BS_OFS			30
+#define UDC_DMA_IN_STS_BS_HOST_READY		0
+#define UDC_DMA_IN_STS_BS_DMA_BUSY		1
+#define UDC_DMA_IN_STS_BS_DMA_DONE		2
+#define UDC_DMA_IN_STS_BS_HOST_BUSY		3
+/* OUT data, Status dword */
+#define UDC_DMA_OUT_STS_RXBYTES_MASK		0x0000ffff
+#define UDC_DMA_OUT_STS_RXBYTES_OFS		0
+#define UDC_DMA_OUT_STS_FRAMENUM_MASK		0x07ff0000
+#define UDC_DMA_OUT_STS_FRAMENUM_OFS		0
+#define UDC_DMA_OUT_STS_L			27
+#define UDC_DMA_OUT_STS_RX_MASK			0x30000000
+#define UDC_DMA_OUT_STS_RX_OFS			28
+#define UDC_DMA_OUT_STS_BS_MASK			0xc0000000
+#define UDC_DMA_OUT_STS_BS_OFS			30
+#define UDC_DMA_OUT_STS_BS_HOST_READY		0
+#define UDC_DMA_OUT_STS_BS_DMA_BUSY		1
+#define UDC_DMA_OUT_STS_BS_DMA_DONE		2
+#define UDC_DMA_OUT_STS_BS_HOST_BUSY		3
+/* max ep0in packet */
+#define UDC_EP0IN_MAXPACKET			1000
+/* max dma packet */
+#define UDC_DMA_MAXPACKET			65536
+
+/* un-usable DMA address */
+#define DMA_DONT_USE				(~(dma_addr_t) 0 )
+
+/* other Endpoint register addresses and values-----------------------------*/
+#define UDC_EP_SUBPTR_ADDR			0x10
+#define UDC_EP_DESPTR_ADDR			0x14
+#define UDC_EP_WRITE_CONFIRM_ADDR		0x1c
+
+/* EP number as layouted in AHB space */
+#define UDC_EP_NUM				32
+#define UDC_EPIN_NUM				16
+#define UDC_EPIN_NUM_USED			5
+#define UDC_EPOUT_NUM				16
+/* EP number of EP's really used = EP0 + 8 data EP's */
+#define UDC_USED_EP_NUM				9
+/* UDC CSR regs are aligned but AHB regs not - offset for OUT EP's */
+#define UDC_CSR_EP_OUT_IX_OFS			12
+
+#define UDC_EP0OUT_IX				16
+#define UDC_EP0IN_IX				0
+
+/* Rx fifo address and size = 1k -------------------------------------------*/
+#define UDC_RXFIFO_ADDR				0x800
+#define UDC_RXFIFO_SIZE				0x400
+
+/* Tx fifo address and size = 1.5k -----------------------------------------*/
+#define UDC_TXFIFO_ADDR				0xc00
+#define UDC_TXFIFO_SIZE				0x600
+
+/* default data endpoints --------------------------------------------------*/
+#define UDC_EPIN_STATUS_IX			1
+#define UDC_EPIN_IX				2
+#define UDC_EPOUT_IX				18
+
+/* general constants -------------------------------------------------------*/
+#define UDC_DWORD_BYTES				4
+#define UDC_BITS_PER_BYTE_SHIFT			3
+#define UDC_BYTE_MASK				0xff
+#define UDC_BITS_PER_BYTE			8
+
+/*---------------------------------------------------------------------------*/
+/* UDC CSR's */
+struct udc_csrs {
+
+	/* sca - setup command address */
+	u32 sca;
+
+	/* ep ne's */
+	u32 ne[UDC_USED_EP_NUM];
+} __attribute__ ((packed));
+
+/* AHB subsystem CSR registers */
+struct udc_regs {
+
+	/* device configuration */
+	u32 cfg;
+
+	/* device control */
+	u32 ctl;
+
+	/* device status */
+	u32 sts;
+
+	/* device interrupt */
+	u32 irqsts;
+
+	/* device interrupt mask */
+	u32 irqmsk;
+
+	/* endpoint interrupt */
+	u32 ep_irqsts;
+
+	/* endpoint interrupt mask */
+	u32 ep_irqmsk;
+} __attribute__ ((packed));
+
+/* endpoint specific registers */
+struct udc_ep_regs {
+
+	/* endpoint control */
+	u32 ctl;
+
+	/* endpoint status */
+	u32 sts;
+
+	/* endpoint buffer size in/ receive packet frame number out */
+	u32 bufin_framenum;
+
+	/* endpoint buffer size out/max packet size */
+	u32 bufout_maxpkt;
+
+	/* endpoint setup buffer pointer */
+	u32 subptr;
+
+	/* endpoint data descriptor pointer */
+	u32 desptr;
+
+	/* reserverd */
+	u32 reserved;
+
+	/* write/read confirmation */
+	u32 confirm;
+
+} __attribute__ ((packed));
+
+/* control data DMA desc */
+struct udc_stp_dma {
+	/* status quadlet */
+	u32	status;
+	/* reserved */
+	u32	_reserved;
+	/* first setup word */
+	u32	data12;
+	/* second setup word */
+	u32	data34;
+} __attribute__ ((aligned (16)));
+
+/* normal data DMA desc */
+struct udc_data_dma {
+	/* status quadlet */
+	u32	status;
+	/* reserved */
+	u32	_reserved;
+	/* buffer pointer */
+	u32	bufptr;
+	/* next descriptor pointer */
+	u32	next;
+} __attribute__ ((aligned (16)));
+
+/* request packet */
+struct udc_request {
+	/* embedded gadget ep */
+	struct usb_request		req;
+
+	/* flags */
+	unsigned			dma_going : 1,
+					dma_mapping : 1,
+					dma_done : 1;
+	/* phys. address */
+	dma_addr_t			td_phys;
+	/* first dma desc. of chain */
+	struct udc_data_dma		*td_data;
+	/* last dma desc. of chain */
+	struct udc_data_dma		*td_data_last;
+	struct list_head		queue;
+
+	/* chain length */
+	unsigned			chain_len;
+
+};
+
+/* UDC specific endpoint parameters */
+struct udc_ep {
+	struct usb_ep			ep;
+	struct udc_ep_regs __iomem	*regs;
+	u32 __iomem			*txfifo;
+	u32 __iomem			*dma;
+	dma_addr_t			td_phys;
+	dma_addr_t			td_stp_dma;
+	struct udc_stp_dma		*td_stp;
+	struct udc_data_dma		*td;
+	/* temp request */
+	struct udc_request		*req;
+	unsigned			req_used;
+	unsigned			req_completed;
+	/* dummy DMA desc for BNA dummy */
+	struct udc_request		*bna_dummy_req;
+	unsigned			bna_occurred;
+
+	/* NAK state */
+	unsigned			naking;
+
+	struct udc			*dev;
+
+	/* queue for requests */
+	struct list_head		queue;
+	const struct usb_endpoint_descriptor	*desc;
+	unsigned			halted;
+	unsigned			cancel_transfer;
+	unsigned			num : 5,
+					fifo_depth : 14,
+					in : 1;
+};
+
+/* device struct */
+struct udc {
+	struct usb_gadget		gadget;
+	spinlock_t			lock;	/* protects all state */
+	/* all endpoints */
+	struct udc_ep			ep[UDC_EP_NUM];
+	struct usb_gadget_driver	*driver;
+	/* operational flags */
+	unsigned			active : 1,
+					stall_ep0in : 1,
+					waiting_zlp_ack_ep0in : 1,
+					set_cfg_not_acked : 1,
+					irq_registered : 1,
+					data_ep_enabled : 1,
+					data_ep_queued : 1,
+					mem_region : 1,
+					sys_suspended : 1,
+					connected;
+
+	u16				chiprev;
+
+	/* registers */
+	struct pci_dev			*pdev;
+	struct udc_csrs __iomem		*csr;
+	struct udc_regs __iomem		*regs;
+	struct udc_ep_regs __iomem	*ep_regs;
+	u32 __iomem			*rxfifo;
+	u32 __iomem			*txfifo;
+
+	/* DMA desc pools */
+	struct pci_pool			*data_requests;
+	struct pci_pool			*stp_requests;
+
+	/* device data */
+	unsigned long			phys_addr;
+	void __iomem			*virt_addr;
+	unsigned			irq;
+
+	/* states */
+	u16				cur_config;
+	u16				cur_intf;
+	u16				cur_alt;
+};
+
+/* setup request data */
+union udc_setup_data {
+	u32			data[2];
+	struct usb_ctrlrequest	request;
+};
+
+/*
+ *---------------------------------------------------------------------------
+ * SET and GET bitfields in u32 values
+ * via constants for mask/offset:
+ * <bit_field_stub_name> is the text between
+ * UDC_ and _MASK|_OFS of appropriate
+ * constant
+ *
+ * set bitfield value in u32 u32Val
+ */
+#define AMD_ADDBITS(u32Val, bitfield_val, bitfield_stub_name)		\
+	(((u32Val) & (((u32) ~((u32) bitfield_stub_name##_MASK))))	\
+	| (((bitfield_val) << ((u32) bitfield_stub_name##_OFS))		\
+		& ((u32) bitfield_stub_name##_MASK)))
+
+/*
+ * set bitfield value in zero-initialized u32 u32Val
+ * => bitfield bits in u32Val are all zero
+ */
+#define AMD_INIT_SETBITS(u32Val, bitfield_val, bitfield_stub_name)	\
+	((u32Val)							\
+	| (((bitfield_val) << ((u32) bitfield_stub_name##_OFS))		\
+		& ((u32) bitfield_stub_name##_MASK)))
+
+/* get bitfield value from u32 u32Val */
+#define AMD_GETBITS(u32Val, bitfield_stub_name)				\
+	((u32Val & ((u32) bitfield_stub_name##_MASK))			\
+		>> ((u32) bitfield_stub_name##_OFS))
+
+/* SET and GET bits in u32 values ------------------------------------------*/
+#define AMD_BIT(bit_stub_name) (1 << bit_stub_name)
+#define AMD_UNMASK_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name))
+#define AMD_CLEAR_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name))
+
+/* debug macros ------------------------------------------------------------*/
+
+#define DBG(udc , args...)	dev_dbg(&(udc)->pdev->dev, args)
+
+#ifdef UDC_VERBOSE
+#define VDBG			DBG
+#else
+#define VDBG(udc , args...)	do {} while (0)
+#endif
+
+#endif /* #ifdef AMD5536UDC_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/android.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/android.c
new file mode 100755
index 0000000..9094fd3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/android.c
@@ -0,0 +1,3106 @@
+/*
+ * Gadget Driver for Android
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *         Benoit Goby <benoit@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/android_notify.h>
+#include <mach/highspeed_debug.h>
+
+#include "gadget_chips.h"
+#include <mach/iomap.h>
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "composite.c"
+
+//#include "f_fs.c"
+//#include "f_audio_source.c"
+#include "f_mass_storage.c"
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_serial.c"
+#include "f_adb.c"
+//#include "f_mtp.c"
+//#include "f_accessory.c"
+#define USB_ETH_RNDIS y
+#include "f_ecm.c"
+#include "f_rndis.c"
+#include "rndis.c"
+#include "f_mbim.c"
+#include "u_ether.c"
+#include "u_diag.c"
+#ifdef _USE_MBIM//CONFIG_USB_F_DIAG_ACM
+#include "f_diag_acm.c"
+#else
+#include "f_diag.c"
+#endif
+
+MODULE_AUTHOR("Mike Lockwood");
+MODULE_DESCRIPTION("Android Composite USB Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+#ifndef CONFIG_SYSTEM_RECOVERY
+int zDrvNand_WriteBootflag( int flag );
+#endif
+
+#define pr_err		USB_DEBUG
+
+
+static const char longname[] = "Gadget Android";
+
+/* Default vendor and product IDs, overridden by userspace */
+#define VENDOR_ID		0x19D2
+#define PRODUCT_ID		0x0193
+
+/* USB net card name */
+#define USB_VNIC_NAME	"usblan"
+
+/* USB NET CARD MAX NUM */
+#define MAX_ECM_INSTANCES 3
+
+
+#define CFG_LUN_NUM_TWO	1
+
+#define MBIM_PACKET_MAX_NUM  10
+#define RNDIS_PACKET_MAX_NUM  10
+
+
+#define OS_string_descriptor_id	0xEE
+#define bMS_Code_Original		0x04
+#define bMS_Code_Change		0x08
+struct android_usb_function {
+	char *name;
+	void *config;
+
+	struct device *dev;
+	struct usb_config_descriptor *cof;
+	char *dev_name;
+	struct device_attribute **attributes;
+
+	/* for android_dev.enabled_functions */
+	struct list_head enabled_list;
+
+	/* Optional: initialization during gadget bind */
+	int (*init)(struct android_usb_function *, struct usb_composite_dev *);
+	/* Optional: cleanup during gadget unbind */
+	void (*cleanup)(struct android_usb_function *);
+	/* Optional: called when the function is added the list of
+	 *		enabled functions */
+	void (*enable)(struct android_usb_function *);
+	/* Optional: called when it is removed */
+	void (*disable)(struct android_usb_function *);
+
+	int (*bind_config)(struct android_usb_function *,
+			   struct usb_configuration *);
+
+	/* Optional: called when the configuration is removed */
+	void (*unbind_config)(struct android_usb_function *,
+			      struct usb_configuration *);
+	/* Optional: handle ctrl requests before the device is configured */
+	int (*ctrlrequest)(struct android_usb_function *,
+					struct usb_composite_dev *,
+					const struct usb_ctrlrequest *);
+};
+
+struct android_dev {
+	struct android_usb_function **functions;
+	struct list_head enabled_functions;
+	struct usb_composite_dev *cdev;
+	struct device *dev;
+
+	bool cdrom_only;
+	bool enabled;
+	int disable_depth;
+	//int bRequest;
+	//int wValue;
+	struct mutex mutex;
+	bool connected;
+	bool sw_connected;
+	struct work_struct work;
+	struct work_struct usbmode;
+	char ffs_aliases[256];
+
+	struct usb_ctrlrequest vendor_req;
+};
+
+static struct class *android_class;
+static struct android_dev *_android_dev;
+static int  multi_packet_num = 10;
+static int  ether_skb_num = 32;
+
+static int android_bind_config(struct usb_configuration *c);
+static void android_unbind_config(struct usb_configuration *c);
+void usb_mods_init(void);
+void usb_mods_exit(void);
+void usb_mods_activate(void);
+void usb_set_ms_auto_reject(int flag);
+extern void dwc_otg_clk_enable(int isOn);
+
+typedef enum usb_enum_mode_type{
+	USB_ENUM_MODE_DEBUG = 0,
+	USB_ENUM_MODE_USER,
+	USB_ENUM_MODE_FACTORY,
+	USB_ENUM_MODE_AMT,
+	USB_ENUM_MODE_EYE_DIAGRAM,		
+	USB_ENUM_MODE_MAX,
+}enum_mode_type;
+
+enum_mode_type usb_cur_enum_mode = USB_ENUM_MODE_DEBUG;
+
+
+
+/* string IDs are assigned dynamically */
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_SERIAL_IDX		2
+#define STRING_CONFIGURATION_IDX		3
+
+static char manufacturer_string[256];
+static char product_string[256];
+static char serial_string[256];
+static char configuration_string[256];
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer_string,
+	[STRING_PRODUCT_IDX].s = product_string,
+	[STRING_SERIAL_IDX].s = serial_string,
+	[STRING_CONFIGURATION_IDX].s = configuration_string,
+	{  }			/* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+	.bLength              = sizeof(device_desc),
+	.bDescriptorType      = USB_DT_DEVICE,
+	.bcdUSB               = __constant_cpu_to_le16(0x0200),
+	.bDeviceClass         = USB_CLASS_PER_INTERFACE,
+	.idVendor             = __constant_cpu_to_le16(VENDOR_ID),
+	.idProduct            = __constant_cpu_to_le16(PRODUCT_ID),
+	.bcdDevice            = __constant_cpu_to_le16(0xffff),
+	.bNumConfigurations   = 1,
+};
+
+static struct usb_configuration android_config_driver = {
+	.label		= "android",
+	.unbind		= android_unbind_config,
+	.bConfigurationValue = 1,
+	.iConfiguration            =	1,
+#if 0	
+	.bmAttributes	= USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER | USB_CONFIG_ATT_WAKEUP,
+#else
+	.bmAttributes	= USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+#endif	
+	.bMaxPower	= 0xFA, /* 500ma */
+};
+
+int get_usb_enum_mode(void)
+{
+	return usb_cur_enum_mode;
+}
+EXPORT_SYMBOL_GPL(get_usb_enum_mode);
+
+static void usb_mode_work(struct work_struct *data)
+{
+	struct android_dev *dev = container_of(data, struct android_dev, usbmode);
+	 
+	switch(dev->vendor_req.bRequest)
+	{
+		case USB_SWITCH_to_DEBUG:
+			if(usb_cur_enum_mode == USB_ENUM_MODE_DEBUG){
+				printk("usb_mode_work already debug mode\n");
+				break;
+			}
+			if(dev->vendor_req.wValue == 0x0002)
+			{
+#ifndef CONFIG_SYSTEM_RECOVERY
+				zDrvNand_WriteBootflag(1);
+#endif
+				usb_notify_up(USB_SWITCH_DEBUG,NULL);
+			}
+			break;
+			
+		case USB_SWITCH_to_DEBUG_AT:
+			if(dev->vendor_req.wValue == 0x0101)
+			{
+#ifndef CONFIG_SYSTEM_RECOVERY
+				zDrvNand_WriteBootflag(0);
+#endif
+				usb_notify_up(USB_SWITCH_DEBUG,NULL);
+			}
+			break;
+
+		case USB_SWITCH_to_USER:
+			if(usb_cur_enum_mode == USB_ENUM_MODE_USER){
+				printk("usb_mode_work already user mode\n");
+				break;
+			}
+#ifndef CONFIG_SYSTEM_RECOVERY
+			zDrvNand_WriteBootflag(1);
+#endif
+			usb_notify_up(USB_SWITCH_USER, NULL);
+			break;
+
+		case USB_SWITCH_to_FACTORY:
+			usb_notify_up(USB_SWITCH_FACTORY, NULL);
+			break;
+
+		case USB_SWITCH_to_AMT:
+			usb_notify_up(USB_SWITCH_AMT, NULL);
+			break;
+		case USB_SWITCH_to_EYE_DIAGRAM:
+			usb_notify_up(USB_SWITCH_EYE_DIAGRAM, NULL);
+			break;
+	}
+}
+
+int get_vnic_multi_packet_num(void)
+{
+	return multi_packet_num;
+}
+
+unsigned int gether_ether_skb_num(void)
+{
+	return ether_skb_num;
+}
+
+static void android_work(struct work_struct *data)
+{
+	struct android_dev *dev = container_of(data, struct android_dev, work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+	char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
+	char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
+	char **uevent_envp = NULL;
+	unsigned long flags;
+	USBSTACK_DBG("%s, %u", __func__, __LINE__);
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		uevent_envp = configured;
+	else if (dev->connected != dev->sw_connected)
+		uevent_envp = dev->connected ? connected : disconnected;
+	dev->sw_connected = dev->connected;
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	if (uevent_envp) {
+		kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp);
+		//pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]);
+	} else {
+		//pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+		//	 dev->connected, dev->sw_connected, cdev->config);
+	}
+}
+
+static void android_enable(struct android_dev *dev)
+{
+	int ret;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	//USB_DEBUG("disable_depth:%d", dev->disable_depth);
+	USBSTACK_DBG("%s, disable_depth:%d", __func__, dev->disable_depth);
+	if (WARN_ON(!dev->disable_depth))
+		return;
+	printk("----android_enable, bmattr:%x\n", android_config_driver.bmAttributes);
+	if (--dev->disable_depth == 0) {
+		ret =usb_add_config(cdev, &android_config_driver,
+					android_bind_config);
+		if(ret)
+			USBSTACK_DBG("usb_add_config, ret:%d", ret);
+		usb_gadget_connect(cdev->gadget);
+	}
+}
+
+static void android_disable(struct android_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	USBSTACK_DBG("%s, disable_depth:%d", __func__, dev->disable_depth);
+	if (dev->disable_depth++ == 0) {
+		usb_gadget_disconnect(cdev->gadget);
+		/* Cancel pending control requests */
+		usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+		usb_remove_config(cdev, &android_config_driver);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+/* Supported functions initialization */
+
+#if 0
+struct functionfs_config {
+	bool opened;
+	bool enabled;
+	struct ffs_data *data;
+};
+
+static int ffs_function_init(struct android_usb_function *f,
+			     struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct functionfs_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+
+	return functionfs_init();
+}
+
+static void ffs_function_cleanup(struct android_usb_function *f)
+{
+	functionfs_cleanup();
+	kfree(f->config);
+}
+
+static void ffs_function_enable(struct android_usb_function *f)
+{
+	struct android_dev *dev = _android_dev;
+	struct functionfs_config *config = f->config;
+
+	config->enabled = true;
+
+	/* Disable the gadget until the function is ready */
+	if (!config->opened)
+		android_disable(dev);
+}
+
+static void ffs_function_disable(struct android_usb_function *f)
+{
+	struct android_dev *dev = _android_dev;
+	struct functionfs_config *config = f->config;
+
+	config->enabled = false;
+
+	/* Balance the disable that was called in closed_callback */
+	if (!config->opened)
+		android_enable(dev);
+}
+
+static int ffs_function_bind_config(struct android_usb_function *f,
+				    struct usb_configuration *c)
+{
+	struct functionfs_config *config = f->config;
+	return functionfs_bind_config(c->cdev, c, config->data);
+}
+
+static ssize_t
+ffs_aliases_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+	struct android_dev *dev = _android_dev;
+	int ret;
+
+	mutex_lock(&dev->mutex);
+	ret = sprintf(buf, "%s\n", dev->ffs_aliases);
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static ssize_t
+ffs_aliases_store(struct device *pdev, struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct android_dev *dev = _android_dev;
+	char buff[256];
+
+	mutex_lock(&dev->mutex);
+
+	if (dev->enabled) {
+		mutex_unlock(&dev->mutex);
+		return -EBUSY;
+	}
+
+	strlcpy(buff, buf, sizeof(buff));
+	strlcpy(dev->ffs_aliases, strim(buff), sizeof(dev->ffs_aliases));
+
+	mutex_unlock(&dev->mutex);
+
+	return size;
+}
+
+static DEVICE_ATTR(aliases, S_IRUGO | S_IWUSR, ffs_aliases_show,
+					       ffs_aliases_store);
+static struct device_attribute *ffs_function_attributes[] = {
+	&dev_attr_aliases,
+	NULL
+};
+
+static struct android_usb_function ffs_function = {
+	.name		= "ffs",
+	.init		= ffs_function_init,
+	.enable		= ffs_function_enable,
+	.disable	= ffs_function_disable,
+	.cleanup	= ffs_function_cleanup,
+	.bind_config	= ffs_function_bind_config,
+	.attributes	= ffs_function_attributes,
+};
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+	struct android_dev *dev = _android_dev;
+	struct functionfs_config *config = ffs_function.config;
+	int ret = 0;
+
+	mutex_lock(&dev->mutex);
+
+	ret = functionfs_bind(ffs, dev->cdev);
+	if (ret)
+		goto err;
+
+	config->data = ffs;
+	config->opened = true;
+
+	if (config->enabled)
+		android_enable(dev);
+
+err:
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+	struct android_dev *dev = _android_dev;
+	struct functionfs_config *config = ffs_function.config;
+
+	mutex_lock(&dev->mutex);
+
+	if (config->enabled)
+		android_disable(dev);
+
+	config->opened = false;
+	config->data = NULL;
+
+	functionfs_unbind(ffs);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int functionfs_check_dev_callback(const char *dev_name)
+{
+	return 0;
+}
+#endif
+
+#define ECM_TIMEOUT	6000
+
+struct ecm_function_config {
+	u8	ethaddr[MAX_ECM_INSTANCES][ETH_ALEN];;
+	int	instances;
+	int	receive_setIfac;
+	int	receive_setICfg;
+	struct delayed_work work;	
+};
+
+extern void dwc_otg_wakelock(int lock_flag,int phase);
+static void ecm_function_work(struct work_struct *data)
+{
+	USBSTACK_DBG("ecm work");
+	dwc_otg_wakelock(1,0);
+	usb_notify_up(USB_DEVICE_PLUGOUT, NULL);
+	usb_notify_up(USB_DEVICE_PLUGIN, NULL);
+	dwc_otg_wakelock(0,0);
+}
+static int ecm_function_ctrlrequest(struct android_usb_function * f,
+					struct usb_composite_dev *dev,
+					const struct usb_ctrlrequest * ctrl)
+{
+	int	value = -1;
+	struct ecm_function_config *ecm = f->config;
+#if 1	
+	switch (ctrl->bRequestType & USB_TYPE_MASK){
+	case USB_TYPE_STANDARD:
+		if(ctrl->bRequest == USB_REQ_SET_CONFIGURATION){
+			if((!ecm->receive_setIfac)&&(!ecm->receive_setICfg)){
+				printk("ecm-receive_setICfg\n");
+				//schedule_delayed_work(&ecm->work, msecs_to_jiffies(ECM_TIMEOUT));
+			}			
+			if((ecm->receive_setIfac)&&(!ecm->receive_setICfg))
+				gether_ecm_uevent(ecm->instances, 1);
+			ecm->receive_setICfg = 1;
+		}else if(ctrl->bRequest == USB_REQ_SET_INTERFACE){
+			u8	intf = (le16_to_cpu(ctrl->wIndex)) & 0xFF;
+			struct usb_function	*f_intf = dev->config->interface[intf];
+			struct f_ecm		*f_ecm = func_to_ecm(f_intf);
+				printk("ecm-receive_setinterface\n");
+			if (intf != f_ecm->data_id)
+				return value;
+			if(ecm->receive_setICfg){
+				//USBSTACK_DBG("ecm-delayed-work cancel");
+				//cancel_delayed_work_sync(&ecm->work);
+				gether_ecm_uevent(ecm->instances, 1);
+			}
+			ecm->receive_setIfac = 1;
+		}
+		break;
+	case USB_TYPE_VENDOR:
+		if((ctrl->bRequest == bMS_Code_Original )||
+			(ctrl->bRequest == bMS_Code_Change )){
+				printk("ecm-sys-id-err");
+				schedule_delayed_work(&ecm->work, msecs_to_jiffies(50));
+		}
+		break;		
+	}
+#endif
+	return value;
+}
+
+static int
+ecm_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	struct ecm_function_config *ecm;
+	
+	USBSTACK_DBG("%s", __func__);
+	
+	ecm = kzalloc(sizeof(struct ecm_function_config), GFP_KERNEL);
+
+	if (!ecm)
+		return -ENOMEM;
+
+	ecm->instances = 1;
+	INIT_DELAYED_WORK(&ecm->work, ecm_function_work);
+	f->config = ecm;
+	
+	return 0;
+}
+
+static void ecm_function_cleanup(struct android_usb_function *f)
+{
+	USBSTACK_DBG("%s", __func__);
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int
+ecm_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	int ret, i;
+    ret =0;
+	struct ecm_function_config *ecm = f->config;
+
+	if (!ecm) {
+		pr_err("%s: ecm_pdata\n", __func__);
+		return -1;
+	}
+	USBSTACK_DBG("%s, instances:%d", __func__, ecm->instances);
+	ecm->receive_setIfac = 0;
+	ecm->receive_setICfg = 0;
+	
+	for (i = 0; ((i < ecm->instances) && (i< MAX_ECM_INSTANCES)); i++) {
+		ret = gether_setup_name_num(c->cdev->gadget, 
+							ecm->ethaddr[i], USB_VNIC_NAME, i);
+		if (ret) {
+			pr_err("%s: gether_setup failed\n", __func__);
+			return ret;
+		}
+
+		ret = ecm_bind_config_num(c, ecm->ethaddr[i], i);
+		if (ret) {
+			pr_err("Could not bind ecm%u config\n", i);
+			break;
+		}
+	}
+
+	return ret;	
+}
+
+static void ecm_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	int i;
+	struct ecm_function_config *ecm = f->config;
+	
+	USBSTACK_DBG("%s", __func__);
+	cancel_delayed_work_sync(&ecm->work);
+	
+	for(i=0; i<ecm->instances; i++){
+		gether_cleanup_num(i);
+	}
+}
+
+static ssize_t ecm_ethaddr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+    struct ecm_function_config *ecm = f->config;
+    if(NULL == ecm)
+    {
+        return -EINVAL;
+    }
+    USBSTACK_DBG("%s", __func__);
+	return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		ecm->ethaddr[0][0], ecm->ethaddr[0][1], ecm->ethaddr[0][2],
+		ecm->ethaddr[0][3], ecm->ethaddr[0][4], ecm->ethaddr[0][5]);
+}
+
+static ssize_t ecm_ethaddr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+    struct ecm_function_config *ecm = f->config;
+    if(NULL == ecm)
+    {
+        return -EINVAL;
+    }
+    USBSTACK_DBG("%s", __func__);
+	if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		    (unsigned int *)&ecm->ethaddr[0][0], (unsigned int *)&ecm->ethaddr[0][1],
+		    (unsigned int *)&ecm->ethaddr[0][2], (unsigned int *)&ecm->ethaddr[0][3],
+		    (unsigned int *)&ecm->ethaddr[0][4], (unsigned int *)&ecm->ethaddr[0][5]) == 6)
+		return size;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr_ecm, S_IRUGO | S_IWUSR, ecm_ethaddr_show,
+					       ecm_ethaddr_store);
+
+static ssize_t ecm_instances_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct ecm_function_config *config = f->config;
+	    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+    USBSTACK_DBG("%s", __func__);
+	return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t ecm_instances_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+    struct ecm_function_config *config = f->config;
+	if(NULL == config)
+    {
+        return -EINVAL;
+    }
+	int value;
+	USBSTACK_DBG("%s", __func__);
+	sscanf(buf, "%d", &value);
+	if ((value > MAX_ECM_INSTANCES)||(value < 1)){
+		USB_DEBUG("WARNNING:SET INVALID ECM NUM:%d!!!", value);
+		value = 1;
+	}
+	config->instances = value;
+	return size;
+}
+
+static DEVICE_ATTR(ecm_instances, S_IRUGO | S_IWUSR, ecm_instances_show,
+						 ecm_instances_store);
+
+static struct device_attribute *ecm_function_attributes[] = {
+	&dev_attr_ethaddr_ecm,
+	&dev_attr_ecm_instances,
+	NULL
+};
+
+
+static struct android_usb_function  ecm_function = {
+	.name		= "ecm",
+	.init		= ecm_function_init,
+	.cleanup	= ecm_function_cleanup,
+	.bind_config	= ecm_function_bind_config,
+	.unbind_config	= ecm_function_unbind_config,
+	.attributes	= ecm_function_attributes,
+	.ctrlrequest	= ecm_function_ctrlrequest,
+};
+
+
+#define MAX_DIAG_INSTANCES 1
+struct diag_function_config {
+	int instances;
+};
+
+static int
+diag_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	USB_DEBUG("DIAG INIT");
+	USBSTACK_DBG("%s", __func__);
+	f->config = kzalloc(sizeof(struct diag_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	((struct diag_function_config *)(f->config))->instances = 1;
+	return diag_setup(cdev->gadget, MAX_DIAG_INSTANCES);
+}
+
+
+static void diag_function_cleanup(struct android_usb_function *f)
+{
+	USBSTACK_DBG("%s", __func__);
+	diag_cleanup();
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int
+diag_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	int i;
+	int ret = 0;
+	struct diag_function_config *config = f->config;
+	USB_DEBUG("diag config, instances:%d",config->instances);
+	USBSTACK_DBG("%s", __func__);
+	for (i = 0; i < config->instances; i++) {
+		//ret = acm_bind_config(c, i);
+		USB_DEBUG("instance :%d", i);
+
+#ifdef _USE_MBIM//CONFIG_USB_F_DIAG_ACM	
+		ret = diag_acm_bind_config(c, i);
+#else
+		ret = diag_bind_config(c, i);
+#endif
+		USB_DEBUG("ret:%d",ret);
+		if (ret) {
+			pr_err("Could not bind diag%u config\n", i);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static struct android_usb_function diag_function = {
+	.name		= "diag",
+	//.enable		= adb_android_function_enable,
+	//.disable	= adb_android_function_disable,
+	.init		= diag_function_init,
+	.cleanup	= diag_function_cleanup,
+	.bind_config	= diag_function_bind_config,
+};
+
+struct adb_data {
+	bool opened;
+	bool enabled;
+};
+
+static int
+adb_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	USBSTACK_DBG("%s", __func__);
+	f->config = kzalloc(sizeof(struct adb_data), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+
+	return adb_setup();
+}
+
+static void adb_function_cleanup(struct android_usb_function *f)
+{
+	USBSTACK_DBG("%s", __func__);
+	adb_cleanup();
+	kfree(f->config);
+}
+
+static int
+adb_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	USBSTACK_DBG("%s", __func__);
+	return adb_bind_config(c);
+}
+
+static void adb_android_function_enable(struct android_usb_function *f)
+{
+	struct android_dev *dev = _android_dev;
+	struct adb_data *data = f->config;
+	USBSTACK_DBG("%s, open:%d", __func__, data->opened);
+	data->enabled = true;
+
+	/* Disable the gadget until adbd is ready */
+	if (!data->opened)
+		android_disable(dev);
+}
+
+static void adb_android_function_disable(struct android_usb_function *f)
+{
+	struct android_dev *dev = _android_dev;
+	struct adb_data *data = f->config;
+	USBSTACK_DBG("%s data opend:%d", __func__, data->opened);
+	data->enabled = false;
+
+	/* Balance the disable that was called in closed_callback */
+	if (!data->opened)
+		android_enable(dev);
+}
+
+static struct android_usb_function adb_function = {
+	.name		= "adb",
+	//.enable		= adb_android_function_enable,
+	//.disable	= adb_android_function_disable,
+	.init		= adb_function_init,
+	.cleanup	= adb_function_cleanup,
+	.bind_config	= adb_function_bind_config,
+};
+
+static void adb_ready_callback(void)
+{
+	struct android_dev *dev = _android_dev;
+	struct adb_data *data = adb_function.config;
+	USBSTACK_DBG("%s, data enable:%d", __func__, data->enabled);
+	mutex_lock(&dev->mutex);
+
+	data->opened = true;
+
+	if (data->enabled){
+		usb_gadget_set_selfpowered(dev->cdev->gadget);
+		android_enable(dev);
+	}
+
+	mutex_unlock(&dev->mutex);
+	USBSTACK_DBG("%s,%u", __func__, __LINE__);
+}
+
+static void adb_closed_callback(void)
+{
+	struct android_dev *dev = _android_dev;
+	struct adb_data *data = adb_function.config;
+	USBSTACK_DBG("%s data enabled:%d", __func__, data->enabled);
+	mutex_lock(&dev->mutex);
+
+	data->opened = false;
+
+	if (data->enabled){
+		dwc_otg_clk_enable(1);
+		android_disable(dev);
+		usb_gadget_clear_selfpowered(dev->cdev->gadget);
+	}
+
+	mutex_unlock(&dev->mutex);
+	USBSTACK_DBG("%s %u", __func__, __LINE__);
+}
+
+
+#define MAX_ACM_INSTANCES 3
+#define MAX_SERIAL_INSTANCES 4
+struct acm_function_config {
+	int instances;
+};
+
+static int
+acm_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	USB_DEBUG("ACM INIT");
+	USBSTACK_DBG("%s", __func__);
+	f->config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	((struct acm_function_config *)(f->config))->instances =1;
+	return gserial_setup(cdev->gadget, MAX_SERIAL_INSTANCES);
+}
+
+static void acm_function_cleanup(struct android_usb_function *f)
+{
+	USBSTACK_DBG("%s", __func__);
+	gserial_cleanup();
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int
+acm_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	int i;
+	int ret = 0;
+	struct acm_function_config *config = f->config;
+	USB_DEBUG("acm config, instances:%d",config->instances);
+	USBSTACK_DBG("%s", __func__);
+	for (i = 0; i < config->instances; i++) {
+		USB_DEBUG("instance :%d", i);
+		ret = acm_bind_config(c, i);
+		//ret = gser_bind_config(c, i);
+		USB_DEBUG("ret:%d",ret);
+		if (ret) {
+			pr_err("Could not bind acm%u config\n", i);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int
+serial_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	int i;
+	int ret = 0;
+	struct acm_function_config *config = f->config;
+	USB_DEBUG("acm config, instances:%d",config->instances);
+	USBSTACK_DBG("%s", __func__);
+	for (i = 0; i < config->instances; i++) {
+		USB_DEBUG("instance :%d", i);
+		ret = gser_bind_config(c, i);
+		USB_DEBUG("ret:%d",ret);
+		if (ret) {
+			pr_err("Could not bind acm%u config\n", i);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static ssize_t acm_instances_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct acm_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+
+	USBSTACK_DBG("%s", __func__);
+	return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t acm_instances_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+    struct acm_function_config *config = f->config;
+	if(NULL == config)
+    {
+        return -EINVAL;
+    }
+    int value;
+	USBSTACK_DBG("%s", __func__);
+	sscanf(buf, "%d", &value);
+	if ((value > MAX_ACM_INSTANCES)||(value<1)){
+		USB_DEBUG("WARNNING:SET INVALID ACM NUM:%d!!!", value);
+		value = MAX_ACM_INSTANCES;
+	}
+	config->instances = value;
+	return size;
+}
+
+static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show,
+						 acm_instances_store);
+static struct device_attribute *acm_function_attributes[] = {
+	&dev_attr_instances,
+	NULL
+};
+
+static struct android_usb_function acm_function = {
+	.name		= "acm",
+	.init		= acm_function_init,
+	.cleanup	= acm_function_cleanup,
+	.bind_config	= acm_function_bind_config,
+	.attributes	= acm_function_attributes,
+};
+
+
+static int
+serial_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	USB_DEBUG("ACM INIT");
+	USBSTACK_DBG("%s", __func__);
+	f->config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	((struct acm_function_config *)(f->config))->instances =1;
+	return gserial_setup(cdev->gadget, MAX_SERIAL_INSTANCES);
+}
+
+
+static ssize_t serial_instances_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+    struct acm_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+    USBSTACK_DBG("%s", __func__);
+	return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t serial_instances_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+    struct acm_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+    int value;
+	USBSTACK_DBG("%s", __func__);
+	sscanf(buf, "%d", &value);
+	if ((value > MAX_ACM_INSTANCES)||(value<1)){
+		USB_DEBUG("WARNNING:SET INVALID ACM NUM:%d!!!", value);
+		value = MAX_ACM_INSTANCES;
+	}
+	config->instances = value;
+	return size;
+}
+
+static DEVICE_ATTR(instances_serial, S_IRUGO | S_IWUSR, serial_instances_show,
+						 serial_instances_store);
+static struct device_attribute *serial_function_attributes[] = {
+	&dev_attr_instances_serial,
+	NULL
+};
+
+
+static struct android_usb_function serial_function = {
+	.name		= "serial",
+	.init		= serial_function_init,
+	.cleanup	= acm_function_cleanup,
+	.bind_config	= serial_function_bind_config,
+	.attributes	= serial_function_attributes,
+};
+
+
+#if 0
+static int
+mtp_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	return mtp_setup();
+}
+
+static void mtp_function_cleanup(struct android_usb_function *f)
+{
+	mtp_cleanup();
+}
+
+static int
+mtp_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	return mtp_bind_config(c, false);
+}
+
+static int
+ptp_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	/* nothing to do - initialization is handled by mtp_function_init */
+	return 0;
+}
+
+static void ptp_function_cleanup(struct android_usb_function *f)
+{
+	/* nothing to do - cleanup is handled by mtp_function_cleanup */
+}
+
+static int
+ptp_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	return mtp_bind_config(c, true);
+}
+
+static int mtp_function_ctrlrequest(struct android_usb_function *f,
+					struct usb_composite_dev *cdev,
+					const struct usb_ctrlrequest *c)
+{
+	return mtp_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function mtp_function = {
+	.name		= "mtp",
+	.init		= mtp_function_init,
+	.cleanup	= mtp_function_cleanup,
+	.bind_config	= mtp_function_bind_config,
+	.ctrlrequest	= mtp_function_ctrlrequest,
+};
+
+/* PTP function is same as MTP with slightly different interface descriptor */
+static struct android_usb_function ptp_function = {
+	.name		= "ptp",
+	.init		= ptp_function_init,
+	.cleanup	= ptp_function_cleanup,
+	.bind_config	= ptp_function_bind_config,
+};
+#endif
+
+struct rndis_function_config {
+	u8      ethaddr[ETH_ALEN];
+	u32     vendorID;
+	char	manufacturer[256];
+	/* "Wireless" RNDIS; auto-detected by Windows */
+	bool	wceis;
+};
+
+static int
+rndis_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	USBSTACK_DBG("%s", __func__);
+	f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	return 0;
+}
+
+static void rndis_function_cleanup(struct android_usb_function *f)
+{
+	USBSTACK_DBG("%s", __func__);
+	kfree(f->config);
+	f->config = NULL;
+}
+
+extern unsigned int ecm_setup_work_time;
+static int
+rndis_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	int ret;
+	struct rndis_function_config *rndis = f->config;
+
+	if(ecm_setup_work_time == 1){
+		ecm_setup_work_time = 0;
+		ecm_work_run_cnt = 0;
+	}
+	if (!rndis) {
+		pr_err("%s: rndis_pdata\n", __func__);
+		return -1;
+	}
+	USBSTACK_DBG("%s", __func__);
+
+	pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+		rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+		rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+    
+	ret =  gether_setup_name(c->cdev->gadget, rndis->ethaddr, USB_VNIC_NAME);
+	if (ret) {
+		printk("%s: gether_setup_name failed, ret:%d", __func__, ret);
+		return ret;
+	}
+
+    //Ó¦ÓòãδʹÓÃ
+	if (rndis->wceis) {
+		/* "Wireless" RNDIS; auto-detected by Windows */
+		rndis_iad_descriptor.bFunctionClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_iad_descriptor.bFunctionSubClass = 0x01;
+		rndis_iad_descriptor.bFunctionProtocol = 0x03;
+		rndis_control_intf.bInterfaceClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_control_intf.bInterfaceSubClass =	 0x01;
+		rndis_control_intf.bInterfaceProtocol =	 0x03;
+	}
+    return  rndis_bind_config_vendor(c, rndis->ethaddr, rndis->vendorID,rndis->manufacturer); 
+    
+}
+
+static void rndis_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	USBSTACK_DBG("%s", __func__);
+	gether_cleanup();
+}
+
+static ssize_t rndis_manufacturer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct rndis_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }    
+	return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t rndis_manufacturer_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct rndis_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+	if (size >= sizeof(config->manufacturer))
+		return -EINVAL;
+	if (sscanf(buf, "%s", config->manufacturer) == 1)
+		return size;
+	return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, rndis_manufacturer_show,
+						    rndis_manufacturer_store);
+
+static ssize_t rndis_wceis_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct rndis_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+
+	return sprintf(buf, "%d\n", config->wceis);
+}
+
+static ssize_t rndis_wceis_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct rndis_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+	int value;
+
+	if (sscanf(buf, "%d", &value) == 1) {
+		config->wceis = value;
+		return size;
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, rndis_wceis_show,
+					     rndis_wceis_store);
+
+static ssize_t rndis_ethaddr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct rndis_function_config *rndis = f->config;
+    if(NULL == rndis)
+    {
+        return -EINVAL;
+    }
+
+	return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+		rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+}
+
+static ssize_t rndis_ethaddr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct rndis_function_config *rndis = f->config;
+    if(NULL == rndis)
+    {
+        return -EINVAL;
+    }
+
+	if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		    (unsigned int *)&rndis->ethaddr[0], (unsigned int *)&rndis->ethaddr[1],
+		    (unsigned int *)&rndis->ethaddr[2], (unsigned int *)&rndis->ethaddr[3],
+		    (unsigned int *)&rndis->ethaddr[4], (unsigned int *)&rndis->ethaddr[5]) == 6)
+		return size;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, rndis_ethaddr_show,
+					       rndis_ethaddr_store);
+
+static ssize_t rndis_vendorID_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct rndis_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+	return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t rndis_vendorID_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+
+	struct rndis_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+	int value;
+
+	if (sscanf(buf, "%04x", (unsigned int *)(&value)) == 1) {
+		config->vendorID = value;
+		return size;
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
+						rndis_vendorID_store);
+
+
+extern unsigned long long g_test_xmit_pktnum;
+extern unsigned long long g_test_xmit_pkterr1;
+extern unsigned long long g_test_xmit_pkterr2;
+extern unsigned long long g_test_rx_pkt ;
+extern unsigned long long g_test_rx_complt_pkt;
+
+static ssize_t rndis_pktNum_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "pktnun:%04x, tx_total:%lld, tx_err1:%lld, tx_err2:%lld,rx:%lld, rx_cpl:%lld\n", multi_packet_num, 
+		g_test_xmit_pktnum, g_test_xmit_pkterr1, g_test_xmit_pkterr2,
+		g_test_rx_pkt,g_test_rx_complt_pkt);
+}
+
+static ssize_t rndis_pktNum_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int value;
+
+	sscanf(buf, "%d", &value);
+	if ((value > RNDIS_PACKET_MAX_NUM )||(value<1)){
+		USB_DEBUG("WARNNING:SET INVALID PACKET NUM:%d!!!", value);
+		value = RNDIS_PACKET_MAX_NUM;
+	}
+	multi_packet_num = value;
+	return size;
+}
+static DEVICE_ATTR(pktNum, S_IRUGO | S_IWUSR, rndis_pktNum_show,
+						rndis_pktNum_store);
+
+static ssize_t ether_skbNum_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "skbnun:%04x\n", ether_skb_num);
+}
+
+static ssize_t ether_skbNum_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int value;
+
+	sscanf(buf, "%d", &value);
+	if ((value > 512)||(value<5)){
+		USB_DEBUG("WARNNING:SET INVALID PACKET NUM:%d!!!", value);
+		value = 512;
+	}
+	ether_skb_num = value;
+	return size;
+}
+
+static DEVICE_ATTR(skbNum, S_IRUGO | S_IWUSR, ether_skbNum_show,
+						ether_skbNum_store);
+
+static struct device_attribute *rndis_function_attributes[] = {
+	&dev_attr_manufacturer,
+	&dev_attr_wceis,
+	&dev_attr_ethaddr,
+	&dev_attr_vendorID,
+	&dev_attr_pktNum,
+	&dev_attr_skbNum,
+	NULL
+};
+
+static struct android_usb_function rndis_function = {
+	.name		= "rndis",
+	.init		= rndis_function_init,
+	.cleanup	= rndis_function_cleanup,
+	.bind_config	= rndis_function_bind_config,
+	.unbind_config	= rndis_function_unbind_config,
+	.attributes	= rndis_function_attributes,
+};
+
+
+
+struct mbim_function_config {
+	u8      ethaddr[ETH_ALEN];
+	u32     vendorID;
+	char	manufacturer[256];
+	bool	wceis;
+};
+
+static int mbim_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	printk("mbim_function_init\n");
+	USBSTACK_DBG("%s", __func__);
+    int ret = 0 ;
+	f->config = kzalloc(sizeof(struct mbim_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+#if 1
+	ret = mbim_conn_chanel_init() ;
+
+    if(ret)
+    {
+        kfree(f->config) ;
+        f->config = NULL ;
+        return ret ;
+    }
+#endif
+	return 0;
+}
+
+static void mbim_function_cleanup(struct android_usb_function *f)
+{
+	USBSTACK_DBG("%s", __func__);
+	kfree(f->config);
+	f->config = NULL;
+}
+
+//extern unsigned int ecm_setup_work_time;
+static int mbim_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	printk("mbim_function_bind_config\n");
+	int ret;
+	struct mbim_function_config *mbim = f->config;
+
+	
+	if (!mbim) {
+		pr_err("%s: mbim_pdata\n", __func__);
+		return -1;
+	}
+	USBSTACK_DBG("%s", __func__);
+
+	pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+		mbim->ethaddr[0], mbim->ethaddr[1], mbim->ethaddr[2],
+		mbim->ethaddr[3], mbim->ethaddr[4], mbim->ethaddr[5]);
+
+	ret = mbim_bind_config(c, mbim->ethaddr); 
+	if (ret) {
+		printk("%s: mbim_bind_config failed, ret:%d", __func__, ret);
+		return ret;
+	}
+	return gether_setup_name(c->cdev->gadget, mbim->ethaddr, USB_VNIC_NAME);
+}
+
+static void mbim_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	USBSTACK_DBG("%s", __func__);
+	gether_cleanup();
+}
+
+#if 0
+static int mbim_function_ctrlrequest(struct android_usb_function * f,
+                    struct usb_composite_dev *dev,
+                    const struct usb_ctrlrequest * ctrl)
+{
+    int value = -1;
+    struct mbim_function_config *mbim = f->config;
+#if 1	
+    switch (ctrl->bRequestType & USB_TYPE_MASK){
+    case USB_TYPE_STANDARD:
+        if(ctrl->bRequest == USB_REQ_SET_CONFIGURATION){
+            if((!mbim->receive_setIfac)&&(!mbim->receive_setICfg)){
+                printk("mbim-receive_setICfg\n");
+                //schedule_delayed_work(&ecm->work, msecs_to_jiffies(ECM_TIMEOUT));
+            }           
+            if((mbim->receive_setIfac)&&(!mbim->receive_setICfg))
+                gether_mbim_uevent(mbim->instances, 1);
+            mbim->receive_setICfg = 1;
+        }else if(ctrl->bRequest == USB_REQ_SET_INTERFACE){
+            u8  intf = (le16_to_cpu(ctrl->wIndex)) & 0xFF;
+            struct usb_function *f_intf = dev->config->interface[intf];
+            struct f_mbim       *f_mbim = func_to_mbim(f_intf);
+                printk("ecm-receive_setinterface\n");
+            if (intf != f_mbim->data_id)
+                return value;
+            if(mbim->receive_setICfg){
+                //USBSTACK_DBG("ecm-delayed-work cancel");
+                //cancel_delayed_work_sync(&ecm->work);
+                gether_mbim_uevent(mbim->instances, 1);
+            }
+            mbim->receive_setIfac = 1;
+        }
+        break;
+    case USB_TYPE_VENDOR:
+        if((ctrl->bRequest == bMS_Code_Original )||
+            (ctrl->bRequest == bMS_Code_Change )){
+                printk("mbim-sys-id-err");
+                schedule_delayed_work(&mbim->work, msecs_to_jiffies(50));
+        }
+        break;  
+    case USB_TYPE_CLASS:
+        if(ctrl->bRequest == USB_CDC_GET_NTB_PARAMETERS){
+        value = w_length > sizeof mbim_ntb_parameters ?
+            sizeof mbim_ntb_parameters : w_length;
+        memcpy(req->buf, &mbim_ntb_parameters, value);
+        //USBSTACK_DBG(cdev, "Host asked NTB parameters\n");
+        break;
+        }
+    }
+#endif
+    return value;
+}
+#endif
+
+
+#if 0
+static ssize_t mbim_manufacturer_show(struct device *dev,
+        struct device_attribute *attr, char *buf)
+{
+    struct android_usb_function *f = dev_get_drvdata(dev);
+    struct rndis_function_config *config = f->config;
+    return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t mbim_manufacturer_store(struct device *dev,
+        struct device_attribute *attr, const char *buf, size_t size)
+{
+    struct android_usb_function *f = dev_get_drvdata(dev);
+    struct mbim_function_config *config = f->config;
+
+    if (size >= sizeof(config->manufacturer))
+        return -EINVAL;
+    if (sscanf(buf, "%s", config->manufacturer) == 1)
+        return size;
+    return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, mbim_manufacturer_show,
+                            mbim_manufacturer_store);
+
+static ssize_t mbim_wceis_show(struct device *dev,
+        struct device_attribute *attr, char *buf)
+{
+    struct android_usb_function *f = dev_get_drvdata(dev);
+    struct mbim_function_config *config = f->config;
+    return sprintf(buf, "%d\n", config->wceis);
+}
+static ssize_t mbim_wceis_store(struct device *dev,
+        struct device_attribute *attr, const char *buf, size_t size)
+{
+    struct android_usb_function *f = dev_get_drvdata(dev);
+    struct mbim_function_config *config = f->config;
+    int value;
+
+    if (sscanf(buf, "%d", &value) == 1) {
+        config->wceis = value;
+        return size;
+    }
+    return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, mbim_wceis_show,
+                         mbim_wceis_store);
+
+static ssize_t mbim_ethaddr_show(struct device *dev,
+        struct device_attribute *attr, char *buf)
+{
+    struct android_usb_function *f = dev_get_drvdata(dev);
+    struct mbim_function_config *mbim = f->config;
+    return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+        mbim->ethaddr[0], mbim->ethaddr[1], mbim->ethaddr[2],
+        mbim->ethaddr[3], mbim->ethaddr[4], mbim->ethaddr[5]);
+}
+
+static ssize_t mbim_ethaddr_store(struct device *dev,
+        struct device_attribute *attr, const char *buf, size_t size)
+{
+    struct android_usb_function *f = dev_get_drvdata(dev);
+    struct mbim_function_config *mbim = f->config;
+
+    if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+            (unsigned int *)&mbim->ethaddr[0], (unsigned int *)&mbim->ethaddr[1],
+            (unsigned int *)&mbim->ethaddr[2], (unsigned int *)&mbim->ethaddr[3],
+            (unsigned int *)&mbim->ethaddr[4], (unsigned int *)&mbim->ethaddr[5]) == 6)
+        return size;
+    return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, mbim_ethaddr_show,
+                           mbim_ethaddr_store);
+
+static ssize_t mbim_vendorID_show(struct device *dev,
+        struct device_attribute *attr, char *buf)
+{
+    struct android_usb_function *f = dev_get_drvdata(dev);
+    struct mbim_function_config *config = f->config;
+    return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t mbim_vendorID_store(struct device *dev,
+        struct device_attribute *attr, const char *buf, size_t size)
+{
+    struct android_usb_function *f = dev_get_drvdata(dev);
+    struct mbim_function_config *config = f->config;
+    int value;
+
+    if (sscanf(buf, "%04x", (unsigned int *)(&value)) == 1) {
+        config->vendorID = value;
+        return size;
+    }
+    return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, mbim_vendorID_show,
+                        mbim_vendorID_store);
+
+
+extern unsigned long long g_test_xmit_pktnum;
+extern unsigned long long g_test_xmit_pkterr1;
+extern unsigned long long g_test_xmit_pkterr2;
+extern unsigned long long g_test_rx_pkt ;
+extern unsigned long long g_test_rx_complt_pkt;
+
+static DEVICE_ATTR(skbNum, S_IRUGO | S_IWUSR, ether_skbNum_show,
+                        ether_skbNum_store);
+#endif
+
+static ssize_t mbim_pktNum_show(struct device *dev,
+        struct device_attribute *attr, char *buf)
+{
+    return sprintf(buf, "pktnun:%04x, tx_total:%lld, tx_err1:%lld, tx_err2:%lld,rx:%lld, rx_cpl:%lld\n", multi_packet_num, 
+        g_test_xmit_pktnum, g_test_xmit_pkterr1, g_test_xmit_pkterr2,
+        g_test_rx_pkt,g_test_rx_complt_pkt);
+}
+
+static ssize_t mbim_pktNum_store(struct device *dev,
+        struct device_attribute *attr, const char *buf, size_t size)
+{
+    int value;
+
+    sscanf(buf, "%d", &value);
+    if ((value > MBIM_PACKET_MAX_NUM) || (value<1)){
+        USB_DEBUG("WARNNING:SET INVALID PACKET NUM:%d!!!", value);
+        value = MBIM_PACKET_MAX_NUM ;
+    }
+    multi_packet_num = value;
+    return size;
+}
+static DEVICE_ATTR(mbimPktNum, S_IRUGO | S_IWUSR, mbim_pktNum_show,
+                                mbim_pktNum_store);
+
+static struct device_attribute *mbim_function_attributes[] = {
+    //&dev_attr_mbim,
+    //&dev_attr_wceis,
+    //&dev_attr_ethaddr,
+    //&dev_attr_vendorID,
+    &dev_attr_mbimPktNum,
+    //&dev_attr_skbNum,
+    NULL,
+} ;
+
+
+
+
+
+ static struct android_usb_function mbim_function = {
+      .name       = "mbim",
+      .init       = mbim_function_init,
+      .cleanup    = mbim_function_cleanup,
+      .bind_config    = mbim_function_bind_config,
+      .unbind_config  = mbim_function_unbind_config,
+      .attributes   = mbim_function_attributes,
+       //.ctrlrequest  = mbim_function_ctrlrequest,
+     };
+
+
+
+struct mass_storage_function_config {
+	struct fsg_config fsg;
+	struct fsg_common *common;
+	int 	fsg_mods_init;
+};
+
+
+
+static char OS_str_des[] = 
+{	0x12, 0x03, 0x4D, 0x00, 0x53, 0x00, 0x46, 0x00, 
+	0x54, 0x00, 0x31, 0x00, 0x30, 0x00, 0x30, 0x00, 
+	0x04, 0x00 
+};
+
+static char  OS_ext_cfg_des_null[] =
+{
+	0x28,0x00,0x00,0x00,0x00,0x01,0x04,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+	0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+};
+
+static int mass_storage_function_ctrlrequest(struct android_usb_function * f,
+					struct usb_composite_dev *dev,
+					const struct usb_ctrlrequest * ctrl)
+{
+	int	value = -1;
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	
+	struct mass_storage_function_config *config = f->config;
+	struct usb_request	*req = dev->req;
+
+	/* ·Çµ¥¹âÅÌÅäÖ㬲»Ê¶±ð²Ù×÷ϵͳ */
+	if(!config->fsg_mods_init){
+		/*ºóÐø´¦Àí¶¼ÓëmodsÓйأ¬Èç δ¼ÓÔØmods£¬ÔòÖ±½Ó·µ»Ø */
+		return value;
+	}
+
+	switch (ctrl->bRequestType & USB_TYPE_MASK){
+
+	case USB_TYPE_STANDARD:
+		if(ctrl->bRequest == USB_REQ_GET_DESCRIPTOR){
+			if(((w_value>>8) == USB_DT_STRING)&&((w_value & 0xFF) == OS_string_descriptor_id)){
+				USBSTACK_DBG("Get Sys des");
+				printk("Get Sys des\n");
+				value = min(w_length, sizeof(OS_str_des));
+				memcpy(req->buf, OS_str_des, value);
+				req->length = value;
+				req->zero = value < w_length;
+				
+				value = usb_ep_queue(dev->gadget->ep0, req, GFP_ATOMIC);
+				if (value < 0) {
+					USBSTACK_DBG("ep_queue --> %d", value);
+					req->status = 0;
+					composite_setup_complete(dev->gadget->ep0, req);
+				}
+			}
+		}else if(ctrl->bRequest == USB_REQ_SET_CONFIGURATION){
+		//	usb_mods_activate();
+		}
+		break;
+
+	case USB_TYPE_CLASS:
+		if(ctrl->bRequest == US_BULK_GET_MAX_LUN){
+			usb_mods_activate();
+		}
+		break;
+	case USB_TYPE_VENDOR:
+		if(ctrl->bRequest == bMS_Code_Original ){
+			USBSTACK_DBG("os string bms org code");
+			printk("os string bms org code\n");
+#if ((defined CONFIG_ARCH_ZX297520V3_MIFI)||(defined CONFIG_ARCH_ZX297520V3_UFI)) && (defined CONFIG_MIN_VERSION)
+			usb_set_ms_auto_reject(1);
+#else
+			usb_set_ms_auto_reject(0);
+#endif
+		}else if(ctrl->bRequest == bMS_Code_Change){
+			USBSTACK_DBG("os string bms changed code");
+			printk("os string bms changed code\n");
+			usb_set_ms_auto_reject(1);
+		}
+		if((ctrl->bRequest == bMS_Code_Original )||
+			(ctrl->bRequest == bMS_Code_Change )){
+			usb_set_sys_id(0);
+			value = min(w_length, sizeof(OS_ext_cfg_des_null));
+			memcpy(req->buf, &OS_ext_cfg_des_null[0], value);
+			req->length = value;
+			req->zero = value < w_length;
+			value = usb_ep_queue(dev->gadget->ep0, req, GFP_ATOMIC);
+			if (value < 0) {
+				USBSTACK_DBG("ep_queue --> %d", value);
+				req->status = 0;
+				composite_setup_complete(dev->gadget->ep0, req);
+			}
+		}
+		break;
+	}
+		
+	return value;
+}
+
+
+static int mass_storage_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	struct mass_storage_function_config *config;
+	struct fsg_common *common;
+	int err;
+	USBSTACK_DBG("%s", __func__);
+	config = kzalloc(sizeof(struct mass_storage_function_config),
+								GFP_KERNEL);
+	if (!config)
+		return -ENOMEM;
+
+	/* config cdrom */
+	config->fsg.nluns = 1;
+	config->fsg.luns[0].removable = 1;
+	config->fsg.luns[0].cdrom = 1;
+	config->fsg.luns[0].ro = 1;
+
+#if CFG_LUN_NUM_TWO
+	/* config Udisk */
+	config->fsg.nluns = 2;
+	config->fsg.luns[1].removable = 1;
+	config->fsg.luns[1].cdrom = 0;
+	config->fsg.luns[1].ro = 0;
+#endif
+
+	common = fsg_common_init(NULL, cdev, &config->fsg);
+	if (IS_ERR(common)) {
+		kfree(config);
+		return PTR_ERR(common);
+	}
+
+	err = sysfs_create_link(&f->dev->kobj,
+				&common->luns[0].dev.kobj,
+				"lun");
+	if (err) {
+		kfree(config);
+        kfree(common);
+		return err;
+	}
+
+	config->common = common;
+	f->config = config;
+
+	return 0;
+}
+
+static void mass_storage_function_cleanup(struct android_usb_function *f)
+{
+    struct mass_storage_function_config *config;
+	USBSTACK_DBG("%s", __func__);
+	//add by gsn, must kill process file-storage 
+	config = (struct mass_storage_function_config *)f->config;
+	if(config != NULL){
+		fsg_common_release(&(config->common->ref));
+	}
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int mass_storage_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct mass_storage_function_config *config = f->config;
+	USBSTACK_DBG("%s", __func__);
+	//usb_set_ms_auto_eject(1);//  auto-reject cdrom 
+	if(_android_dev->cdrom_only){
+		usb_mods_init();
+		config->fsg_mods_init = 1;
+	}
+	return fsg_bind_config(c->cdev, c, config->common);
+}
+static void mass_storage_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct mass_storage_function_config *config = f->config;
+	USBSTACK_DBG("%s", __func__);
+	if(config->fsg_mods_init){
+		usb_mods_exit();
+		config->fsg_mods_init = 0;		
+	}
+}
+
+#if CFG_LUN_NUM_TWO
+
+static ssize_t mass_storage_nluns_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct mass_storage_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+
+	USBSTACK_DBG("fsg nluns_show: %d", config->common->nluns);
+	
+	return sprintf(buf, "%d\n", config->common->nluns);
+}
+
+static ssize_t mass_storage_nluns_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct mass_storage_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+
+	int value;							
+	if (sscanf(buf, "%d", &value) == 1) {
+		if((value>2)||(value<1)){
+			USB_DEBUG("WARNNING:SET INVALID LUN NUM:%d!!!", value);
+			value = 2;
+		}
+		
+		config->common->nluns = value;	
+
+		USBSTACK_DBG("fsg nluns_store: %d", value);
+		return size;						
+	}								
+	return -1;							
+}
+
+static DEVICE_ATTR(nluns, S_IRUGO | S_IWUSR,
+					mass_storage_nluns_show,
+					mass_storage_nluns_store);
+
+#endif
+
+static ssize_t mass_storage_inquiry_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }
+	struct mass_storage_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+
+	return sprintf(buf, "%s\n", config->common->inquiry_string);
+}
+
+static ssize_t mass_storage_inquiry_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+    if(NULL == f)
+    {
+        return -EINVAL;
+    }	
+    struct mass_storage_function_config *config = f->config;
+    if(NULL == config)
+    {
+        return -EINVAL;
+    }
+    if (size >= sizeof(config->common->inquiry_string))
+		return -EINVAL;
+
+	//sscanfÓöµ½¿Õ¸ñÍ£Ö¹, ¹ÊÐÞ¸ÄΪmemcpy
+#if 1
+	memset(config->common->inquiry_string, 0, sizeof(config->common->inquiry_string));
+	memcpy(config->common->inquiry_string, buf, size);
+#else
+	if (sscanf(buf, "%s", config->common->inquiry_string) != 1)
+		return -EINVAL;
+#endif
+	return size;
+}
+
+static DEVICE_ATTR(inquiry_string, S_IRUGO | S_IWUSR,
+					mass_storage_inquiry_show,
+					mass_storage_inquiry_store);
+
+static struct device_attribute *mass_storage_function_attributes[] = {
+	//&dev_attr_inquiry_string,
+#if CFG_LUN_NUM_TWO
+	&dev_attr_nluns,
+#endif
+	NULL
+};
+
+static struct android_usb_function mass_storage_function = {
+	.name		= "mass_storage",
+	.init		= mass_storage_function_init,
+	.cleanup	= mass_storage_function_cleanup,
+	.bind_config	= mass_storage_function_bind_config,
+	.unbind_config	 = mass_storage_function_unbind_config,
+	.attributes	= mass_storage_function_attributes,
+	.ctrlrequest	= mass_storage_function_ctrlrequest,
+};
+
+
+#if 0
+static int accessory_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	return acc_setup();
+}
+
+static void accessory_function_cleanup(struct android_usb_function *f)
+{
+	acc_cleanup();
+}
+
+static int accessory_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	return acc_bind_config(c);
+}
+
+static int accessory_function_ctrlrequest(struct android_usb_function *f,
+						struct usb_composite_dev *cdev,
+						const struct usb_ctrlrequest *c)
+{
+	return acc_ctrlrequest(cdev, c);
+}
+
+
+static struct android_usb_function accessory_function = {
+	.name		= "accessory",
+	.init		= accessory_function_init,
+	.cleanup	= accessory_function_cleanup,
+	.bind_config	= accessory_function_bind_config,
+	.ctrlrequest	= accessory_function_ctrlrequest,
+};
+
+static int audio_source_function_init(struct android_usb_function *f,
+			struct usb_composite_dev *cdev)
+{
+	struct audio_source_config *config;
+
+	config = kzalloc(sizeof(struct audio_source_config), GFP_KERNEL);
+	if (!config)
+		return -ENOMEM;
+	config->card = -1;
+	config->device = -1;
+	f->config = config;
+	return 0;
+}
+
+static void audio_source_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+}
+
+static int audio_source_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct audio_source_config *config = f->config;
+
+	return audio_source_bind_config(c, config);
+}
+
+static void audio_source_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct audio_source_config *config = f->config;
+
+	config->card = -1;
+	config->device = -1;
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct audio_source_config *config = f->config;
+
+	/* print PCM card and device numbers */
+	return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(pcm, S_IRUGO | S_IWUSR, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+	&dev_attr_pcm,
+	NULL
+};
+
+static struct android_usb_function audio_source_function = {
+	.name		= "audio_source",
+	.init		= audio_source_function_init,
+	.cleanup	= audio_source_function_cleanup,
+	.bind_config	= audio_source_function_bind_config,
+	.unbind_config	= audio_source_function_unbind_config,
+	.attributes	= audio_source_function_attributes,
+};
+#endif
+
+static struct android_usb_function *supported_functions[] = {
+	//&ffs_function,
+	&ecm_function,
+	&diag_function,
+	&adb_function,
+	&acm_function,
+	&serial_function,
+//	&mtp_function,
+//	&ptp_function,
+	&rndis_function,
+	&mass_storage_function,
+    &mbim_function,
+//	&accessory_function,
+//	&audio_source_function,
+	NULL
+};
+
+
+static int android_init_functions(struct android_usb_function **functions,
+				  struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+	struct android_usb_function *f;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	int err =0;
+	int index = 0;
+	USBSTACK_DBG("%s", __func__);
+	for (; (f = *functions++); index++) {
+		f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+		if(f->dev_name == NULL){
+			err = -ENOMEM;
+			goto err_out;
+		}		
+		f->dev = device_create(android_class, dev->dev,
+				MKDEV(0, index), f, f->dev_name);
+		if (IS_ERR(f->dev)) {
+			pr_err("%s: Failed to create dev %s", __func__,
+							f->dev_name);
+			err = PTR_ERR(f->dev);
+			goto err_create;
+		}
+
+		if (f->init) {
+			err = f->init(f, cdev);
+			if (err) {
+				pr_err("%s: Failed to init %s", __func__,
+								f->name);
+				goto err_out;
+			}
+		}
+
+		attrs = f->attributes;
+		if (attrs) {
+			while ((attr = *attrs++) && !err)
+				err = device_create_file(f->dev, attr);
+		}
+		if (err) {
+			pr_err("%s: Failed to create function %s attributes",
+					__func__, f->name);
+			goto err_out;
+		}
+	}
+	return 0;
+
+err_out:
+	device_destroy(android_class, f->dev->devt);
+err_create:
+	kfree(f->dev_name);
+	return err;
+}
+
+static void android_cleanup_functions(struct android_usb_function **functions)
+{
+	struct android_usb_function *f;
+	USBSTACK_DBG("%s", __func__);
+	while (*functions) {
+		f = *functions++;
+
+		if (f->dev) {
+			device_destroy(android_class, f->dev->devt);
+			kfree(f->dev_name);
+		}
+
+		if (f->cleanup)
+			f->cleanup(f);
+	}
+}
+
+static int
+android_bind_enabled_functions(struct android_dev *dev,
+			       struct usb_configuration *c)
+{
+	struct android_usb_function *f;
+	int ret;
+	USBSTACK_DBG("%s", __func__);
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		USBSTACK_DBG("%s, %u %s", __func__, __LINE__, f->name);
+		ret = f->bind_config(f, c);
+		if (ret) {
+			USBSTACK_DBG("%s: %s failed", __func__, f->name);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static void
+android_unbind_enabled_functions(struct android_dev *dev,
+			       struct usb_configuration *c)
+{
+	struct android_usb_function *f;
+	USBSTACK_DBG("%s", __func__);
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		if (f->unbind_config)
+			f->unbind_config(f, c);
+	}
+}
+
+static int android_enable_function(struct android_dev *dev, char *name)
+{
+	struct android_usb_function **functions = dev->functions;
+	struct android_usb_function *f;
+
+	while ((f = *functions++)) {
+		if (!strcmp(name, f->name)) {
+			USBSTACK_DBG("%s--%s", __func__ ,  f->name );
+			list_add_tail(&f->enabled_list,
+						&dev->enabled_functions);
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+/* /sys/class/android_usb/android%d/ interface */
+
+static ssize_t
+functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+    if(NULL == dev)
+    {
+        return -EINVAL;
+    }
+    struct android_usb_function *f;
+	char *buff = buf;
+	USBSTACK_DBG("%s", __func__);
+	mutex_lock(&dev->mutex);
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list)
+		buff += sprintf(buff, "%s,", f->name);
+
+	mutex_unlock(&dev->mutex);
+    USBSTACK_DBG("%s, %u", __func__, __LINE__);
+	if (buff != buf)
+		*(buff-1) = '\n';
+	return buff - buf;
+}
+
+static ssize_t
+functions_store(struct device *pdev, struct device_attribute *attr,
+			       const char *buff, size_t size)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+    if(NULL == dev)
+    {
+        return -EINVAL;
+    }
+    char *name;
+	char buf[256], *b;
+	char aliases[256], *a;
+	int err;
+	int is_ffs;
+	int ffs_enabled = 0;
+	int count = 0;
+	USBSTACK_DBG("%s", __func__);
+	mutex_lock(&dev->mutex);
+
+	if (dev->enabled) {
+		mutex_unlock(&dev->mutex);
+		USBSTACK_DBG("%s, %u", __func__, __LINE__);
+		return -EBUSY;
+	}
+
+	INIT_LIST_HEAD(&dev->enabled_functions);
+
+	strlcpy(buf, buff, sizeof(buf));
+	b = strim(buf);
+
+	dev->cdrom_only = false;
+	while (b) {
+		name = strsep(&b, ",");
+		if (!name)
+			continue;
+
+		is_ffs = 0;
+		strlcpy(aliases, dev->ffs_aliases, sizeof(aliases));
+		a = aliases;
+
+		while (a) {
+			char *alias = strsep(&a, ",");
+			if (alias && !strcmp(name, alias)) {
+				is_ffs = 1;
+				break;
+			}
+		}
+
+		if (is_ffs) {
+			if (ffs_enabled)
+				continue;
+			err = android_enable_function(dev, "ffs");
+			if (err)
+				pr_err("android_usb: Cannot enable ffs (%d)",
+									err);
+			else
+				ffs_enabled = 1;
+			continue;
+		}
+
+		if((++count == 1)&&(strcmp(name, "mass_storage")==0)){
+			dev->cdrom_only = true;
+		}else{
+			dev->cdrom_only = false;
+		}
+	
+		err = android_enable_function(dev, name);
+		if (err){
+			pr_err("android_usb: Cannot enable '%s' (%d)",
+							   name, err);
+			printk(KERN_WARNING "android_usb: Cannot enable %s \n", name);
+		}
+	}
+
+	mutex_unlock(&dev->mutex);
+    USBSTACK_DBG("%s, %u", __func__, __LINE__);
+	return size;
+}
+
+static ssize_t enable_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+    if(NULL == dev)
+    {
+        return -EINVAL;
+    }
+    USBSTACK_DBG("%s", __func__);
+	return sprintf(buf, "%d\n", dev->enabled);
+}
+
+static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
+			    const char *buff, size_t size)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+    if(NULL == dev)
+    {
+        return -EINVAL;
+    }
+
+	struct usb_composite_dev *cdev = dev->cdev;
+    if(NULL == cdev)
+    {
+        return -EINVAL;
+    }
+
+	//struct usb_configuration *c_desc = cdev->config;
+	struct android_usb_function *f;
+	int enabled = 0;
+
+	if (!cdev)
+		return -ENODEV;
+
+    USBSTACK_DBG("%s, %u", __func__, __LINE__);
+	mutex_lock(&dev->mutex);
+
+	sscanf(buff, "%d", &enabled);
+	USBSTACK_DBG("enable_store enable:%d, dev->enabled:%d", enabled, (size_t)dev->enabled);
+	
+	if (enabled && !dev->enabled) {
+		USB_DEBUG("USB ENABLE");
+		USBSTACK_DBG("USB ENABLE");
+
+		//close usb power
+		usb_gadget_set_selfpowered(cdev->gadget);		
+		
+		/*
+		 * Update values in composite driver's copy of
+		 * device descriptor.
+		 */
+		cdev->desc.idVendor = device_desc.idVendor;
+		cdev->desc.idProduct = device_desc.idProduct;
+		cdev->desc.bcdDevice = device_desc.bcdDevice;
+		cdev->desc.bDeviceClass = device_desc.bDeviceClass;
+		cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
+		cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
+		//c_desc->iConfiguration= android_config_driver.iConfiguration;
+		list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+			USB_DEBUG("function name:%s", f->name);
+			USBSTACK_DBG("function name:%s", f->name);
+			if (f->enable)
+				f->enable(f);
+#if 0
+			if(!strcmp(f->name,"ecm"))
+			{
+				cdev->desc.bDeviceClass = USB_CLASS_COMM;
+			}
+#endif
+		}
+		android_enable(dev);
+		dev->enabled = true;
+	} else if (!enabled && dev->enabled) {
+		USB_DEBUG("USB DISENABLE");
+		USBSTACK_DBG("USB DISENABLE");
+		android_disable(dev);
+		list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+			if (f->disable)
+				f->disable(f);
+		}
+		dev->enabled = false;
+		//close usb power
+		usb_gadget_clear_selfpowered(cdev->gadget);		
+	} else {
+		pr_err("android_usb: already %s\n",
+				dev->enabled ? "enabled" : "disabled");
+		USBSTACK_DBG("android_usb: already %s\n",
+				dev->enabled ? "enabled" : "disabled");
+	}
+
+	mutex_unlock(&dev->mutex);
+	USBSTACK_DBG("%s, %u", __func__, __LINE__);
+	return size;
+}
+
+void gadget_disable(void)
+{
+	struct android_dev *dev = _android_dev;
+	struct android_usb_function *f;
+	struct usb_composite_dev *cdev = dev->cdev;
+	
+	USB_DEBUG("USB DISENABLE");
+	USBSTACK_DBG("USB DISENABLE");
+	android_disable(dev);
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		if (f->disable)
+			f->disable(f);
+	}
+	dev->enabled = false;
+	//close usb power
+	usb_gadget_clear_selfpowered(cdev->gadget); 
+
+}
+EXPORT_SYMBOL(gadget_disable);
+
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+    if(NULL == dev)
+    {
+        return -EINVAL;
+    }
+	struct usb_composite_dev *cdev = dev->cdev;
+    if(NULL == cdev)
+    {
+        return -EINVAL;
+    }
+    char *state = "DISCONNECTED";
+	unsigned long flags;
+
+	if (!cdev)
+		goto out;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		state = "CONFIGURED";
+	else if (dev->connected)
+		state = "CONNECTED";
+	spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+	return sprintf(buf, "%s\n", state);
+}
+static ssize_t log_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	usb_dbg_showLog();
+	return sprintf(buf, "%s\n", "OK");		\
+}
+
+
+static ssize_t iSerial_enable_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	return sprintf(buf, "%d\n", strings_dev[STRING_SERIAL_IDX].id);		\
+}
+
+static u8 snID = 0;
+static ssize_t iSerial_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int enabled = 0;
+	struct android_dev *pdev = dev_get_drvdata(dev);
+    if(NULL == pdev)
+    {
+        return -EINVAL ;
+    }
+	struct usb_composite_dev *cdev = pdev->cdev;
+    if(NULL == cdev)
+    {
+        return -EINVAL;
+    }
+
+	sscanf(buf, "%d", &enabled);
+
+	if(enabled == 0){
+		strings_dev[STRING_SERIAL_IDX].id = 0;
+		device_desc.iSerialNumber = 0;
+		cdev->desc.iSerialNumber = 0;
+	}else{
+		strings_dev[STRING_SERIAL_IDX].id = snID;
+		device_desc.iSerialNumber = snID;
+		cdev->desc.iSerialNumber = snID;
+	}
+	return size;
+}
+
+
+#define DESCRIPTOR_ATTR(field, format_string)				\
+static ssize_t								\
+field ## _show(struct device *dev, struct device_attribute *attr,	\
+		char *buf)						\
+{									\
+	return sprintf(buf, format_string, device_desc.field);		\
+}									\
+static ssize_t								\
+field ## _store(struct device *dev, struct device_attribute *attr,	\
+		const char *buf, size_t size)				\
+{									\
+	int value;							\
+	if (sscanf(buf, format_string, &value) == 1) {			\
+		device_desc.field = value;				\
+		return size;						\
+	}								\
+	return -1;							\
+}									\
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define DESCRIPTOR_STRING_ATTR(field, buffer)				\
+static ssize_t								\
+field ## _show(struct device *dev, struct device_attribute *attr,	\
+		char *buf)						\
+{									\
+	return sprintf(buf, "%s", buffer);				\
+}									\
+static ssize_t								\
+field ## _store(struct device *dev, struct device_attribute *attr,	\
+		const char *buf, size_t size)				\
+{									\
+	if (size >= sizeof(buffer))					\
+		return -EINVAL;						\
+	return strlcpy(buffer, buf, sizeof(buffer));			\
+}									\
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define CONFIGURATION_ATTR(field, format_string)				\
+static ssize_t								\
+field ## _show(struct device *dev, struct device_attribute *attr, \
+			char *buf)						\
+{												\
+	return sprintf(buf, format_string, android_config_driver.field);		\
+}									\
+static ssize_t								\
+field ## _store(struct device *dev, struct device_attribute *attr,	\
+		const char *buf, size_t size)				\
+{									\
+	int value;							\
+	if (sscanf(buf, format_string, &value) == 1) {			\
+		android_config_driver.field = value;				\
+		return size;						\
+	}								\
+	return -1;							\
+}									\
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field## _show, field ## _store);
+
+#define CONFIGURATION_STRING_ATTR(field, buffer)				\
+static ssize_t								\
+field ## _show(struct device *dev, struct device_attribute *attr,	\
+		char *buf)						\
+{									\
+	return sprintf(buf, "%s\n", buffer);				\
+}									\
+static ssize_t								\
+field ## _store(struct device *dev, struct device_attribute *attr,	\
+		const char *buf, size_t size)				\
+{									\
+	if (size >= sizeof(buffer))					\
+		return -EINVAL;						\
+	return strlcpy(buffer, buf, sizeof(buffer));			\
+}									\
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field## _show, field ## _store);
+
+
+
+DESCRIPTOR_ATTR(idVendor, "%04x\n")
+DESCRIPTOR_ATTR(idProduct, "%04x\n")
+DESCRIPTOR_ATTR(bcdDevice, "%04x\n")
+DESCRIPTOR_ATTR(bDeviceClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceSubClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceProtocol, "%d\n")
+DESCRIPTOR_STRING_ATTR(iManufacturer, manufacturer_string)
+DESCRIPTOR_STRING_ATTR(iProduct, product_string)
+DESCRIPTOR_STRING_ATTR(iSerial, serial_string)
+
+CONFIGURATION_STRING_ATTR(iConfiguration, configuration_string)
+static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show,
+						 functions_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+//DESCRIPTOR_ATTR(bMaxPacketSize0, "%d\n")
+
+CONFIGURATION_ATTR(bmAttributes, "%d\n")
+	
+static DEVICE_ATTR(log, S_IRUGO, log_show, NULL);
+static DEVICE_ATTR(iSerial_enable, S_IRUGO | S_IWUSR, iSerial_enable_show, iSerial_enable_store);
+
+/*
+ *this function show is suspend of device,1:suspended; 0:running
+ */
+static ssize_t suspend_state_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{	struct android_dev *pdev = dev_get_drvdata(dev);
+    if(NULL == pdev)
+    {
+        return -EINVAL;
+    }
+	struct usb_composite_dev *cdev = pdev->cdev;
+    if(NULL == cdev)
+    {
+        return -EINVAL;
+    }
+    
+	return sprintf(buf, "%d\n", cdev->suspended);
+}
+static DEVICE_ATTR(suspend_state, S_IRUGO, suspend_state_show, NULL);
+
+/*
+ * this function used by app to wakeup usb host and device
+ * to use this function, usb device should enable attribute remotewakeup 
+ * in android_config_driver
+*/
+static ssize_t usb_wakeup_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t n)
+{
+	struct android_dev *pdev = dev_get_drvdata(dev);
+	struct usb_composite_dev *cdev = NULL;
+
+	if(NULL!=pdev)
+		cdev= pdev->cdev;
+	else
+		return n;
+	
+	if (sysfs_streq(buf, "1"))
+		usb_gadget_wakeup(cdev->gadget);
+
+	return n;
+}
+static DEVICE_ATTR(wakeup, S_IWUSR, NULL, usb_wakeup_store);
+
+
+static ssize_t usb_enum_mode_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	return sprintf(buf, "%d\n", usb_cur_enum_mode);		
+}
+static ssize_t usb_enum_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long enum_mode = 0;
+	enum_mode = simple_strtoul(buf, NULL, 16);
+
+	if(enum_mode < 5){		
+		usb_cur_enum_mode = enum_mode;
+	}else
+		printk("set mode fail, default mode is %d\n", usb_cur_enum_mode);
+	return size;
+}
+static DEVICE_ATTR(usb_enum_mode, S_IRUGO | S_IWUSR, usb_enum_mode_show, usb_enum_mode_store);
+
+
+static struct device_attribute *android_usb_attributes[] = {
+	&dev_attr_idVendor,
+	&dev_attr_idProduct,
+	&dev_attr_bcdDevice,
+	&dev_attr_bDeviceClass,
+	&dev_attr_bDeviceSubClass,
+	&dev_attr_bDeviceProtocol,
+	&dev_attr_iManufacturer,
+	&dev_attr_iProduct,
+	&dev_attr_iSerial,
+	&dev_attr_functions,
+	&dev_attr_enable,
+	&dev_attr_state,
+	&dev_attr_log,
+	&dev_attr_iConfiguration,
+	&dev_attr_iSerial_enable,
+	&dev_attr_suspend_state,
+	&dev_attr_wakeup,
+	&dev_attr_usb_enum_mode,
+	&dev_attr_bmAttributes,
+	NULL
+};
+
+/*-------------------------------------------------------------------------*/
+/* Composite driver */
+
+static int android_bind_config(struct usb_configuration *c)
+{
+	struct android_dev *dev = _android_dev;
+	int ret = 0;
+	USBSTACK_DBG("%s", __func__);
+	ret = android_bind_enabled_functions(dev, c);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void android_unbind_config(struct usb_configuration *c)
+{
+	struct android_dev *dev = _android_dev;
+	USBSTACK_DBG("%s", __func__);
+	android_unbind_enabled_functions(dev, c);
+}
+
+static int android_bind(struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			gcnum, id, ret;
+	USBSTACK_DBG("%s", __func__);
+	/*
+	 * Start disconnected. Userspace will connect the gadget once
+	 * it is done configuring the functions.
+	 */
+	usb_gadget_disconnect(gadget);
+
+	ret = android_init_functions(dev->functions, cdev);
+	if (ret)
+		return ret;
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_MANUFACTURER_IDX].id = id;
+	device_desc.iManufacturer = id;
+
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_CONFIGURATION_IDX].id = id;
+	android_config_driver.iConfiguration= id;
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_PRODUCT_IDX].id = id;
+	device_desc.iProduct = id;
+
+	/* Default strings - should be updated by userspace */
+	strncpy(manufacturer_string, "DEMO,Incorporated", sizeof(manufacturer_string)-1);
+	strncpy(product_string, "DEMO Mobile Boardband", sizeof(product_string) - 1);
+	strncpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1);
+	strncpy(configuration_string, "DEMO Configuration", sizeof(configuration_string) - 1);
+
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_SERIAL_IDX].id = id;
+	device_desc.iSerialNumber = id;
+	snID = id;
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
+	else {
+		pr_warning("%s: controller '%s' not recognized\n",
+			longname, gadget->name);
+		device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
+	}
+
+	dev->cdev = cdev;
+
+	return 0;
+}
+
+static int android_usb_unbind(struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+	USBSTACK_DBG("%s", __func__);
+	cancel_work_sync(&dev->work);
+	android_cleanup_functions(dev->functions);
+	return 0;
+}
+
+static struct usb_composite_driver android_usb_driver = {
+	.name		= "android_usb",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.unbind		= android_usb_unbind,
+	.max_speed	= USB_SPEED_HIGH,
+};
+
+static int
+android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
+{
+	struct android_dev		*dev = _android_dev;
+    if(NULL == dev)
+    {
+        return -EINVAL;
+    }
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+    if(NULL == cdev)
+    {
+        return -EINVAL;
+    }
+    struct usb_request		*req = cdev->req;
+	struct android_usb_function	*f;
+	int value = -EOPNOTSUPP;
+	unsigned long flags;
+
+#if 1
+	USBSTACK_DBG("SETUP 0x%x,%x,v0x%x,w0x%x,l0x%x", 
+		c->bRequestType, 
+		c->bRequest,
+		c->wValue,
+		c->wIndex,
+		c->wLength);
+    USBSTACK_DBG("%s, %u", __func__, __LINE__);
+//    usb_dbg_ep0reg();
+#endif	
+	req->zero = 0;
+	req->complete = composite_setup_complete;
+	req->length = 0;
+	gadget->ep0->driver_data = cdev;
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		if (f->ctrlrequest) {
+			value = f->ctrlrequest(f, cdev, c);
+			if (value >= 0)
+				break;
+		}
+	}
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if((c->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR){
+	  	//USBSTACK_DBG("schedule_work vendor");
+		dev->vendor_req.bRequest = c->bRequest;
+		dev->vendor_req.wValue = c->wValue; 
+		if(dev->vendor_req.bRequest == 0xA2){
+			panic("now panic by user\n");
+		}
+		//if(usb_cur_enum_mode != USB_ENUM_MODE_USER)
+			schedule_work(&dev->usbmode);
+		//else
+		//	printk("user mode can not do mode switch\n");
+ 	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	if(((c->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR)&&(value==-1))
+		return value=-1;
+	 
+	/* Special case the accessory function.
+	 * It needs to handle control requests before it is enabled.
+	 */
+#if 0
+	if (value < 0)
+		value = acc_ctrlrequest(cdev, c);
+#endif
+	if (value < 0)
+		value = composite_setup(gadget, c);
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (!dev->connected) {
+		dev->connected = 1;
+		schedule_work(&dev->work);
+	} else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+						cdev->config) {
+		schedule_work(&dev->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	return value;
+}
+
+static void android_disconnect(struct usb_gadget *gadget)
+{
+	struct android_dev *dev = _android_dev;
+	struct usb_composite_dev *cdev = get_gadget_data(gadget);
+	unsigned long flags;
+	USBSTACK_DBG("%s", __func__);
+	composite_disconnect(gadget);
+	/* accessory HID support can be active while the
+	   accessory function is not actually enabled,
+	   so we need to inform it when we are disconnected.
+	 */
+#if 0
+	acc_disconnect();
+#endif
+	spin_lock_irqsave(&cdev->lock, flags);
+	dev->connected = 0;
+	schedule_work(&dev->work);
+	spin_unlock_irqrestore(&cdev->lock, flags);
+}
+
+static int android_create_device(struct android_dev *dev)
+{
+	struct device_attribute **attrs = android_usb_attributes;
+	struct device_attribute *attr;
+	int err;
+
+	dev->dev = device_create(android_class, NULL,
+					MKDEV(0, 0), NULL, "android0");
+	if (IS_ERR(dev->dev))
+		return PTR_ERR(dev->dev);
+
+	dev_set_drvdata(dev->dev, dev);
+
+	while ((attr = *attrs++)) {
+		err = device_create_file(dev->dev, attr);
+		if (err) {
+			device_destroy(android_class, dev->dev->devt);
+			return err;
+		}
+	}
+	return 0;
+}
+
+
+static int __init init(void)
+{
+	struct android_dev *dev;
+	int err;
+	USBSTACK_DBG("REGISTER USB STACK DRIVER BEGIN");
+	android_class = class_create(THIS_MODULE, "android_usb");
+	if (IS_ERR(android_class))
+		return PTR_ERR(android_class);
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->disable_depth = 1;
+	//dev->disable_depth = 0;
+	dev->functions = supported_functions;
+	INIT_LIST_HEAD(&dev->enabled_functions);
+	INIT_WORK(&dev->work, android_work);
+       INIT_WORK(&dev->usbmode, usb_mode_work);  
+	USBSTACK_DBG("%s, %u", __func__, __LINE__);
+	mutex_init(&dev->mutex);
+
+	err = android_create_device(dev);
+	if (err) {
+		class_destroy(android_class);
+		kfree(dev);
+		return err;
+	}
+
+	_android_dev = dev;
+
+	/* Override composite driver functions */
+	composite_driver.setup = android_setup;
+	composite_driver.disconnect = android_disconnect;
+
+	err = usb_composite_probe(&android_usb_driver, android_bind);
+	
+	//close usb power at last
+	usb_gadget_clear_selfpowered(dev->cdev->gadget);
+	USBSTACK_DBG("REGISTER USB STACK DRIVER END");
+	return err;
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&android_usb_driver);
+	class_destroy(android_class);
+	kfree(_android_dev);
+	_android_dev = NULL;
+}
+module_exit(cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/at91_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/at91_udc.c
new file mode 100644
index 0000000..bf5671c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/at91_udc.c
@@ -0,0 +1,2025 @@
+/*
+ * at91_udc -- driver for at91-series USB peripheral controller
+ *
+ * Copyright (C) 2004 by Thomas Rathbone
+ * Copyright (C) 2005 by HP Labs
+ * Copyright (C) 2005 by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#undef	VERBOSE_DEBUG
+#undef	PACKET_TRACE
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/clk.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+
+#include <asm/byteorder.h>
+#include <mach/hardware.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/gpio.h>
+
+#include <mach/board.h>
+#include <mach/cpu.h>
+#include <mach/at91sam9261_matrix.h>
+#include <mach/at91_matrix.h>
+
+#include "at91_udc.h"
+
+
+/*
+ * This controller is simple and PIO-only.  It's used in many AT91-series
+ * full speed USB controllers, including the at91rm9200 (arm920T, with MMU),
+ * at91sam926x (arm926ejs, with MMU), and several no-mmu versions.
+ *
+ * This driver expects the board has been wired with two GPIOs suppporting
+ * a VBUS sensing IRQ, and a D+ pullup.  (They may be omitted, but the
+ * testing hasn't covered such cases.)
+ *
+ * The pullup is most important (so it's integrated on sam926x parts).  It
+ * provides software control over whether the host enumerates the device.
+ *
+ * The VBUS sensing helps during enumeration, and allows both USB clocks
+ * (and the transceiver) to stay gated off until they're necessary, saving
+ * power.  During USB suspend, the 48 MHz clock is gated off in hardware;
+ * it may also be gated off by software during some Linux sleep states.
+ */
+
+#define	DRIVER_VERSION	"3 May 2006"
+
+static const char driver_name [] = "at91_udc";
+static const char ep0name[] = "ep0";
+
+#define VBUS_POLL_TIMEOUT	msecs_to_jiffies(1000)
+
+#define at91_udp_read(udc, reg) \
+	__raw_readl((udc)->udp_baseaddr + (reg))
+#define at91_udp_write(udc, reg, val) \
+	__raw_writel((val), (udc)->udp_baseaddr + (reg))
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#include <linux/seq_file.h>
+
+static const char debug_filename[] = "driver/udc";
+
+#define FOURBITS "%s%s%s%s"
+#define EIGHTBITS FOURBITS FOURBITS
+
+static void proc_ep_show(struct seq_file *s, struct at91_ep *ep)
+{
+	static char		*types[] = {
+		"control", "out-iso", "out-bulk", "out-int",
+		"BOGUS",   "in-iso",  "in-bulk",  "in-int"};
+
+	u32			csr;
+	struct at91_request	*req;
+	unsigned long	flags;
+	struct at91_udc	*udc = ep->udc;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	csr = __raw_readl(ep->creg);
+
+	/* NOTE:  not collecting per-endpoint irq statistics... */
+
+	seq_printf(s, "\n");
+	seq_printf(s, "%s, maxpacket %d %s%s %s%s\n",
+			ep->ep.name, ep->ep.maxpacket,
+			ep->is_in ? "in" : "out",
+			ep->is_iso ? " iso" : "",
+			ep->is_pingpong
+				? (ep->fifo_bank ? "pong" : "ping")
+				: "",
+			ep->stopped ? " stopped" : "");
+	seq_printf(s, "csr %08x rxbytes=%d %s %s %s" EIGHTBITS "\n",
+		csr,
+		(csr & 0x07ff0000) >> 16,
+		(csr & (1 << 15)) ? "enabled" : "disabled",
+		(csr & (1 << 11)) ? "DATA1" : "DATA0",
+		types[(csr & 0x700) >> 8],
+
+		/* iff type is control then print current direction */
+		(!(csr & 0x700))
+			? ((csr & (1 << 7)) ? " IN" : " OUT")
+			: "",
+		(csr & (1 << 6)) ? " rxdatabk1" : "",
+		(csr & (1 << 5)) ? " forcestall" : "",
+		(csr & (1 << 4)) ? " txpktrdy" : "",
+
+		(csr & (1 << 3)) ? " stallsent" : "",
+		(csr & (1 << 2)) ? " rxsetup" : "",
+		(csr & (1 << 1)) ? " rxdatabk0" : "",
+		(csr & (1 << 0)) ? " txcomp" : "");
+	if (list_empty (&ep->queue))
+		seq_printf(s, "\t(queue empty)\n");
+
+	else list_for_each_entry (req, &ep->queue, queue) {
+		unsigned	length = req->req.actual;
+
+		seq_printf(s, "\treq %p len %d/%d buf %p\n",
+				&req->req, length,
+				req->req.length, req->req.buf);
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static void proc_irq_show(struct seq_file *s, const char *label, u32 mask)
+{
+	int i;
+
+	seq_printf(s, "%s %04x:%s%s" FOURBITS, label, mask,
+		(mask & (1 << 13)) ? " wakeup" : "",
+		(mask & (1 << 12)) ? " endbusres" : "",
+
+		(mask & (1 << 11)) ? " sofint" : "",
+		(mask & (1 << 10)) ? " extrsm" : "",
+		(mask & (1 << 9)) ? " rxrsm" : "",
+		(mask & (1 << 8)) ? " rxsusp" : "");
+	for (i = 0; i < 8; i++) {
+		if (mask & (1 << i))
+			seq_printf(s, " ep%d", i);
+	}
+	seq_printf(s, "\n");
+}
+
+static int proc_udc_show(struct seq_file *s, void *unused)
+{
+	struct at91_udc	*udc = s->private;
+	struct at91_ep	*ep;
+	u32		tmp;
+
+	seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
+
+	seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
+		udc->vbus ? "present" : "off",
+		udc->enabled
+			? (udc->vbus ? "active" : "enabled")
+			: "disabled",
+		udc->selfpowered ? "self" : "VBUS",
+		udc->suspended ? ", suspended" : "",
+		udc->driver ? udc->driver->driver.name : "(none)");
+
+	/* don't access registers when interface isn't clocked */
+	if (!udc->clocked) {
+		seq_printf(s, "(not clocked)\n");
+		return 0;
+	}
+
+	tmp = at91_udp_read(udc, AT91_UDP_FRM_NUM);
+	seq_printf(s, "frame %05x:%s%s frame=%d\n", tmp,
+		(tmp & AT91_UDP_FRM_OK) ? " ok" : "",
+		(tmp & AT91_UDP_FRM_ERR) ? " err" : "",
+		(tmp & AT91_UDP_NUM));
+
+	tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+	seq_printf(s, "glbstate %02x:%s" FOURBITS "\n", tmp,
+		(tmp & AT91_UDP_RMWUPE) ? " rmwupe" : "",
+		(tmp & AT91_UDP_RSMINPR) ? " rsminpr" : "",
+		(tmp & AT91_UDP_ESR) ? " esr" : "",
+		(tmp & AT91_UDP_CONFG) ? " confg" : "",
+		(tmp & AT91_UDP_FADDEN) ? " fadden" : "");
+
+	tmp = at91_udp_read(udc, AT91_UDP_FADDR);
+	seq_printf(s, "faddr   %03x:%s fadd=%d\n", tmp,
+		(tmp & AT91_UDP_FEN) ? " fen" : "",
+		(tmp & AT91_UDP_FADD));
+
+	proc_irq_show(s, "imr   ", at91_udp_read(udc, AT91_UDP_IMR));
+	proc_irq_show(s, "isr   ", at91_udp_read(udc, AT91_UDP_ISR));
+
+	if (udc->enabled && udc->vbus) {
+		proc_ep_show(s, &udc->ep[0]);
+		list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
+			if (ep->desc)
+				proc_ep_show(s, ep);
+		}
+	}
+	return 0;
+}
+
+static int proc_udc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_udc_show, PDE(inode)->data);
+}
+
+static const struct file_operations proc_ops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_udc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void create_debug_file(struct at91_udc *udc)
+{
+	udc->pde = proc_create_data(debug_filename, 0, NULL, &proc_ops, udc);
+}
+
+static void remove_debug_file(struct at91_udc *udc)
+{
+	if (udc->pde)
+		remove_proc_entry(debug_filename, NULL);
+}
+
+#else
+
+static inline void create_debug_file(struct at91_udc *udc) {}
+static inline void remove_debug_file(struct at91_udc *udc) {}
+
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+static void done(struct at91_ep *ep, struct at91_request *req, int status)
+{
+	unsigned	stopped = ep->stopped;
+	struct at91_udc	*udc = ep->udc;
+
+	list_del_init(&req->queue);
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+	if (status && status != -ESHUTDOWN)
+		VDBG("%s done %p, status %d\n", ep->ep.name, req, status);
+
+	ep->stopped = 1;
+	spin_unlock(&udc->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&udc->lock);
+	ep->stopped = stopped;
+
+	/* ep0 is always ready; other endpoints need a non-empty queue */
+	if (list_empty(&ep->queue) && ep->int_mask != (1 << 0))
+		at91_udp_write(udc, AT91_UDP_IDR, ep->int_mask);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* bits indicating OUT fifo has data ready */
+#define	RX_DATA_READY	(AT91_UDP_RX_DATA_BK0 | AT91_UDP_RX_DATA_BK1)
+
+/*
+ * Endpoint FIFO CSR bits have a mix of bits, making it unsafe to just write
+ * back most of the value you just read (because of side effects, including
+ * bits that may change after reading and before writing).
+ *
+ * Except when changing a specific bit, always write values which:
+ *  - clear SET_FX bits (setting them could change something)
+ *  - set CLR_FX bits (clearing them could change something)
+ *
+ * There are also state bits like FORCESTALL, EPEDS, DIR, and EPTYPE
+ * that shouldn't normally be changed.
+ *
+ * NOTE at91sam9260 docs mention synch between UDPCK and MCK clock domains,
+ * implying a need to wait for one write to complete (test relevant bits)
+ * before starting the next write.  This shouldn't be an issue given how
+ * infrequently we write, except maybe for write-then-read idioms.
+ */
+#define	SET_FX	(AT91_UDP_TXPKTRDY)
+#define	CLR_FX	(RX_DATA_READY | AT91_UDP_RXSETUP \
+		| AT91_UDP_STALLSENT | AT91_UDP_TXCOMP)
+
+/* pull OUT packet data from the endpoint's fifo */
+static int read_fifo (struct at91_ep *ep, struct at91_request *req)
+{
+	u32 __iomem	*creg = ep->creg;
+	u8 __iomem	*dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
+	u32		csr;
+	u8		*buf;
+	unsigned int	count, bufferspace, is_done;
+
+	buf = req->req.buf + req->req.actual;
+	bufferspace = req->req.length - req->req.actual;
+
+	/*
+	 * there might be nothing to read if ep_queue() calls us,
+	 * or if we already emptied both pingpong buffers
+	 */
+rescan:
+	csr = __raw_readl(creg);
+	if ((csr & RX_DATA_READY) == 0)
+		return 0;
+
+	count = (csr & AT91_UDP_RXBYTECNT) >> 16;
+	if (count > ep->ep.maxpacket)
+		count = ep->ep.maxpacket;
+	if (count > bufferspace) {
+		DBG("%s buffer overflow\n", ep->ep.name);
+		req->req.status = -EOVERFLOW;
+		count = bufferspace;
+	}
+	__raw_readsb(dreg, buf, count);
+
+	/* release and swap pingpong mem bank */
+	csr |= CLR_FX;
+	if (ep->is_pingpong) {
+		if (ep->fifo_bank == 0) {
+			csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
+			ep->fifo_bank = 1;
+		} else {
+			csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK1);
+			ep->fifo_bank = 0;
+		}
+	} else
+		csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
+	__raw_writel(csr, creg);
+
+	req->req.actual += count;
+	is_done = (count < ep->ep.maxpacket);
+	if (count == bufferspace)
+		is_done = 1;
+
+	PACKET("%s %p out/%d%s\n", ep->ep.name, &req->req, count,
+			is_done ? " (done)" : "");
+
+	/*
+	 * avoid extra trips through IRQ logic for packets already in
+	 * the fifo ... maybe preventing an extra (expensive) OUT-NAK
+	 */
+	if (is_done)
+		done(ep, req, 0);
+	else if (ep->is_pingpong) {
+		/*
+		 * One dummy read to delay the code because of a HW glitch:
+		 * CSR returns bad RXCOUNT when read too soon after updating
+		 * RX_DATA_BK flags.
+		 */
+		csr = __raw_readl(creg);
+
+		bufferspace -= count;
+		buf += count;
+		goto rescan;
+	}
+
+	return is_done;
+}
+
+/* load fifo for an IN packet */
+static int write_fifo(struct at91_ep *ep, struct at91_request *req)
+{
+	u32 __iomem	*creg = ep->creg;
+	u32		csr = __raw_readl(creg);
+	u8 __iomem	*dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
+	unsigned	total, count, is_last;
+	u8		*buf;
+
+	/*
+	 * TODO: allow for writing two packets to the fifo ... that'll
+	 * reduce the amount of IN-NAKing, but probably won't affect
+	 * throughput much.  (Unlike preventing OUT-NAKing!)
+	 */
+
+	/*
+	 * If ep_queue() calls us, the queue is empty and possibly in
+	 * odd states like TXCOMP not yet cleared (we do it, saving at
+	 * least one IRQ) or the fifo not yet being free.  Those aren't
+	 * issues normally (IRQ handler fast path).
+	 */
+	if (unlikely(csr & (AT91_UDP_TXCOMP | AT91_UDP_TXPKTRDY))) {
+		if (csr & AT91_UDP_TXCOMP) {
+			csr |= CLR_FX;
+			csr &= ~(SET_FX | AT91_UDP_TXCOMP);
+			__raw_writel(csr, creg);
+			csr = __raw_readl(creg);
+		}
+		if (csr & AT91_UDP_TXPKTRDY)
+			return 0;
+	}
+
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+	total = req->req.length - req->req.actual;
+	if (ep->ep.maxpacket < total) {
+		count = ep->ep.maxpacket;
+		is_last = 0;
+	} else {
+		count = total;
+		is_last = (count < ep->ep.maxpacket) || !req->req.zero;
+	}
+
+	/*
+	 * Write the packet, maybe it's a ZLP.
+	 *
+	 * NOTE:  incrementing req->actual before we receive the ACK means
+	 * gadget driver IN bytecounts can be wrong in fault cases.  That's
+	 * fixable with PIO drivers like this one (save "count" here, and
+	 * do the increment later on TX irq), but not for most DMA hardware.
+	 *
+	 * So all gadget drivers must accept that potential error.  Some
+	 * hardware supports precise fifo status reporting, letting them
+	 * recover when the actual bytecount matters (e.g. for USB Test
+	 * and Measurement Class devices).
+	 */
+	__raw_writesb(dreg, buf, count);
+	csr &= ~SET_FX;
+	csr |= CLR_FX | AT91_UDP_TXPKTRDY;
+	__raw_writel(csr, creg);
+	req->req.actual += count;
+
+	PACKET("%s %p in/%d%s\n", ep->ep.name, &req->req, count,
+			is_last ? " (done)" : "");
+	if (is_last)
+		done(ep, req, 0);
+	return is_last;
+}
+
+static void nuke(struct at91_ep *ep, int status)
+{
+	struct at91_request *req;
+
+	/* terminate any request in the queue */
+	ep->stopped = 1;
+	if (list_empty(&ep->queue))
+		return;
+
+	VDBG("%s %s\n", __func__, ep->ep.name);
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct at91_request, queue);
+		done(ep, req, status);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int at91_ep_enable(struct usb_ep *_ep,
+				const struct usb_endpoint_descriptor *desc)
+{
+	struct at91_ep	*ep = container_of(_ep, struct at91_ep, ep);
+	struct at91_udc	*udc = ep->udc;
+	u16		maxpacket;
+	u32		tmp;
+	unsigned long	flags;
+
+	if (!_ep || !ep
+			|| !desc || ep->desc
+			|| _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT
+			|| (maxpacket = usb_endpoint_maxp(desc)) == 0
+			|| maxpacket > ep->maxpacket) {
+		DBG("bad ep or descriptor\n");
+		return -EINVAL;
+	}
+
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+		DBG("bogus device state\n");
+		return -ESHUTDOWN;
+	}
+
+	tmp = usb_endpoint_type(desc);
+	switch (tmp) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		DBG("only one control endpoint\n");
+		return -EINVAL;
+	case USB_ENDPOINT_XFER_INT:
+		if (maxpacket > 64)
+			goto bogus_max;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		switch (maxpacket) {
+		case 8:
+		case 16:
+		case 32:
+		case 64:
+			goto ok;
+		}
+bogus_max:
+		DBG("bogus maxpacket %d\n", maxpacket);
+		return -EINVAL;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (!ep->is_pingpong) {
+			DBG("iso requires double buffering\n");
+			return -EINVAL;
+		}
+		break;
+	}
+
+ok:
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* initialize endpoint to match this descriptor */
+	ep->is_in = usb_endpoint_dir_in(desc);
+	ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
+	ep->stopped = 0;
+	if (ep->is_in)
+		tmp |= 0x04;
+	tmp <<= 8;
+	tmp |= AT91_UDP_EPEDS;
+	__raw_writel(tmp, ep->creg);
+
+	ep->desc = desc;
+	ep->ep.maxpacket = maxpacket;
+
+	/*
+	 * reset/init endpoint fifo.  NOTE:  leaves fifo_bank alone,
+	 * since endpoint resets don't reset hw pingpong state.
+	 */
+	at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
+	at91_udp_write(udc, AT91_UDP_RST_EP, 0);
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int at91_ep_disable (struct usb_ep * _ep)
+{
+	struct at91_ep	*ep = container_of(_ep, struct at91_ep, ep);
+	struct at91_udc	*udc = ep->udc;
+	unsigned long	flags;
+
+	if (ep == &ep->udc->ep[0])
+		return -EINVAL;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	nuke(ep, -ESHUTDOWN);
+
+	/* restore the endpoint's pristine config */
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->ep.maxpacket = ep->maxpacket;
+
+	/* reset fifos and endpoint */
+	if (ep->udc->clocked) {
+		at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
+		at91_udp_write(udc, AT91_UDP_RST_EP, 0);
+		__raw_writel(0, ep->creg);
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+/*
+ * this is a PIO-only driver, so there's nothing
+ * interesting for request or buffer allocation.
+ */
+
+static struct usb_request *
+at91_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct at91_request *req;
+
+	req = kzalloc(sizeof (struct at91_request), gfp_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+	return &req->req;
+}
+
+static void at91_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct at91_request *req;
+
+	req = container_of(_req, struct at91_request, req);
+	BUG_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+static int at91_ep_queue(struct usb_ep *_ep,
+			struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct at91_request	*req;
+	struct at91_ep		*ep;
+	struct at91_udc		*udc;
+	int			status;
+	unsigned long		flags;
+
+	req = container_of(_req, struct at91_request, req);
+	ep = container_of(_ep, struct at91_ep, ep);
+
+	if (!_req || !_req->complete
+			|| !_req->buf || !list_empty(&req->queue)) {
+		DBG("invalid request\n");
+		return -EINVAL;
+	}
+
+	if (!_ep || (!ep->desc && ep->ep.name != ep0name)) {
+		DBG("invalid ep\n");
+		return -EINVAL;
+	}
+
+	udc = ep->udc;
+
+	if (!udc || !udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+		DBG("invalid device\n");
+		return -EINVAL;
+	}
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* try to kickstart any empty and idle queue */
+	if (list_empty(&ep->queue) && !ep->stopped) {
+		int	is_ep0;
+
+		/*
+		 * If this control request has a non-empty DATA stage, this
+		 * will start that stage.  It works just like a non-control
+		 * request (until the status stage starts, maybe early).
+		 *
+		 * If the data stage is empty, then this starts a successful
+		 * IN/STATUS stage.  (Unsuccessful ones use set_halt.)
+		 */
+		is_ep0 = (ep->ep.name == ep0name);
+		if (is_ep0) {
+			u32	tmp;
+
+			if (!udc->req_pending) {
+				status = -EINVAL;
+				goto done;
+			}
+
+			/*
+			 * defer changing CONFG until after the gadget driver
+			 * reconfigures the endpoints.
+			 */
+			if (udc->wait_for_config_ack) {
+				tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+				tmp ^= AT91_UDP_CONFG;
+				VDBG("toggle config\n");
+				at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
+			}
+			if (req->req.length == 0) {
+ep0_in_status:
+				PACKET("ep0 in/status\n");
+				status = 0;
+				tmp = __raw_readl(ep->creg);
+				tmp &= ~SET_FX;
+				tmp |= CLR_FX | AT91_UDP_TXPKTRDY;
+				__raw_writel(tmp, ep->creg);
+				udc->req_pending = 0;
+				goto done;
+			}
+		}
+
+		if (ep->is_in)
+			status = write_fifo(ep, req);
+		else {
+			status = read_fifo(ep, req);
+
+			/* IN/STATUS stage is otherwise triggered by irq */
+			if (status && is_ep0)
+				goto ep0_in_status;
+		}
+	} else
+		status = 0;
+
+	if (req && !status) {
+		list_add_tail (&req->queue, &ep->queue);
+		at91_udp_write(udc, AT91_UDP_IER, ep->int_mask);
+	}
+done:
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return (status < 0) ? status : 0;
+}
+
+static int at91_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct at91_ep		*ep;
+	struct at91_request	*req;
+	unsigned long		flags;
+	struct at91_udc		*udc;
+
+	ep = container_of(_ep, struct at91_ep, ep);
+	if (!_ep || ep->ep.name == ep0name)
+		return -EINVAL;
+
+	udc = ep->udc;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore(&udc->lock, flags);
+		return -EINVAL;
+	}
+
+	done(ep, req, -ECONNRESET);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int at91_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct at91_ep	*ep = container_of(_ep, struct at91_ep, ep);
+	struct at91_udc	*udc = ep->udc;
+	u32 __iomem	*creg;
+	u32		csr;
+	unsigned long	flags;
+	int		status = 0;
+
+	if (!_ep || ep->is_iso || !ep->udc->clocked)
+		return -EINVAL;
+
+	creg = ep->creg;
+	spin_lock_irqsave(&udc->lock, flags);
+
+	csr = __raw_readl(creg);
+
+	/*
+	 * fail with still-busy IN endpoints, ensuring correct sequencing
+	 * of data tx then stall.  note that the fifo rx bytecount isn't
+	 * completely accurate as a tx bytecount.
+	 */
+	if (ep->is_in && (!list_empty(&ep->queue) || (csr >> 16) != 0))
+		status = -EAGAIN;
+	else {
+		csr |= CLR_FX;
+		csr &= ~SET_FX;
+		if (value) {
+			csr |= AT91_UDP_FORCESTALL;
+			VDBG("halt %s\n", ep->ep.name);
+		} else {
+			at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
+			at91_udp_write(udc, AT91_UDP_RST_EP, 0);
+			csr &= ~AT91_UDP_FORCESTALL;
+		}
+		__raw_writel(csr, creg);
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return status;
+}
+
+static const struct usb_ep_ops at91_ep_ops = {
+	.enable		= at91_ep_enable,
+	.disable	= at91_ep_disable,
+	.alloc_request	= at91_ep_alloc_request,
+	.free_request	= at91_ep_free_request,
+	.queue		= at91_ep_queue,
+	.dequeue	= at91_ep_dequeue,
+	.set_halt	= at91_ep_set_halt,
+	/* there's only imprecise fifo status reporting */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int at91_get_frame(struct usb_gadget *gadget)
+{
+	struct at91_udc *udc = to_udc(gadget);
+
+	if (!to_udc(gadget)->clocked)
+		return -EINVAL;
+	return at91_udp_read(udc, AT91_UDP_FRM_NUM) & AT91_UDP_NUM;
+}
+
+static int at91_wakeup(struct usb_gadget *gadget)
+{
+	struct at91_udc	*udc = to_udc(gadget);
+	u32		glbstate;
+	int		status = -EINVAL;
+	unsigned long	flags;
+
+	DBG("%s\n", __func__ );
+	spin_lock_irqsave(&udc->lock, flags);
+
+	if (!udc->clocked || !udc->suspended)
+		goto done;
+
+	/* NOTE:  some "early versions" handle ESR differently ... */
+
+	glbstate = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+	if (!(glbstate & AT91_UDP_ESR))
+		goto done;
+	glbstate |= AT91_UDP_ESR;
+	at91_udp_write(udc, AT91_UDP_GLB_STAT, glbstate);
+
+done:
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return status;
+}
+
+/* reinit == restore initial software state */
+static void udc_reinit(struct at91_udc *udc)
+{
+	u32 i;
+
+	INIT_LIST_HEAD(&udc->gadget.ep_list);
+	INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
+
+	for (i = 0; i < NUM_ENDPOINTS; i++) {
+		struct at91_ep *ep = &udc->ep[i];
+
+		if (i != 0)
+			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+		ep->desc = NULL;
+		ep->stopped = 0;
+		ep->fifo_bank = 0;
+		ep->ep.maxpacket = ep->maxpacket;
+		ep->creg = (void __iomem *) udc->udp_baseaddr + AT91_UDP_CSR(i);
+		/* initialize one queue per endpoint */
+		INIT_LIST_HEAD(&ep->queue);
+	}
+}
+
+static void stop_activity(struct at91_udc *udc)
+{
+	struct usb_gadget_driver *driver = udc->driver;
+	int i;
+
+	if (udc->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	udc->suspended = 0;
+
+	for (i = 0; i < NUM_ENDPOINTS; i++) {
+		struct at91_ep *ep = &udc->ep[i];
+		ep->stopped = 1;
+		nuke(ep, -ESHUTDOWN);
+	}
+	if (driver) {
+		spin_unlock(&udc->lock);
+		driver->disconnect(&udc->gadget);
+		spin_lock(&udc->lock);
+	}
+
+	udc_reinit(udc);
+}
+
+static void clk_on(struct at91_udc *udc)
+{
+	if (udc->clocked)
+		return;
+	udc->clocked = 1;
+	clk_enable(udc->iclk);
+	clk_enable(udc->fclk);
+}
+
+static void clk_off(struct at91_udc *udc)
+{
+	if (!udc->clocked)
+		return;
+	udc->clocked = 0;
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	clk_disable(udc->fclk);
+	clk_disable(udc->iclk);
+}
+
+/*
+ * activate/deactivate link with host; minimize power usage for
+ * inactive links by cutting clocks and transceiver power.
+ */
+static void pullup(struct at91_udc *udc, int is_on)
+{
+	int	active = !udc->board.pullup_active_low;
+
+	if (!udc->enabled || !udc->vbus)
+		is_on = 0;
+	DBG("%sactive\n", is_on ? "" : "in");
+
+	if (is_on) {
+		clk_on(udc);
+		at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM);
+		at91_udp_write(udc, AT91_UDP_TXVC, 0);
+		if (cpu_is_at91rm9200())
+			gpio_set_value(udc->board.pullup_pin, active);
+		else if (cpu_is_at91sam9260() || cpu_is_at91sam9263() || cpu_is_at91sam9g20()) {
+			u32	txvc = at91_udp_read(udc, AT91_UDP_TXVC);
+
+			txvc |= AT91_UDP_TXVC_PUON;
+			at91_udp_write(udc, AT91_UDP_TXVC, txvc);
+		} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
+			u32	usbpucr;
+
+			usbpucr = at91_matrix_read(AT91_MATRIX_USBPUCR);
+			usbpucr |= AT91_MATRIX_USBPUCR_PUON;
+			at91_matrix_write(AT91_MATRIX_USBPUCR, usbpucr);
+		}
+	} else {
+		stop_activity(udc);
+		at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM);
+		at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
+		if (cpu_is_at91rm9200())
+			gpio_set_value(udc->board.pullup_pin, !active);
+		else if (cpu_is_at91sam9260() || cpu_is_at91sam9263() || cpu_is_at91sam9g20()) {
+			u32	txvc = at91_udp_read(udc, AT91_UDP_TXVC);
+
+			txvc &= ~AT91_UDP_TXVC_PUON;
+			at91_udp_write(udc, AT91_UDP_TXVC, txvc);
+		} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
+			u32	usbpucr;
+
+			usbpucr = at91_matrix_read(AT91_MATRIX_USBPUCR);
+			usbpucr &= ~AT91_MATRIX_USBPUCR_PUON;
+			at91_matrix_write(AT91_MATRIX_USBPUCR, usbpucr);
+		}
+		clk_off(udc);
+	}
+}
+
+/* vbus is here!  turn everything on that's ready */
+static int at91_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct at91_udc	*udc = to_udc(gadget);
+	unsigned long	flags;
+
+	/* VDBG("vbus %s\n", is_active ? "on" : "off"); */
+	spin_lock_irqsave(&udc->lock, flags);
+	udc->vbus = (is_active != 0);
+	if (udc->driver)
+		pullup(udc, is_active);
+	else
+		pullup(udc, 0);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int at91_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct at91_udc	*udc = to_udc(gadget);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	udc->enabled = is_on = !!is_on;
+	pullup(udc, is_on);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int at91_set_selfpowered(struct usb_gadget *gadget, int is_on)
+{
+	struct at91_udc	*udc = to_udc(gadget);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	udc->selfpowered = (is_on != 0);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int at91_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int at91_stop(struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops at91_udc_ops = {
+	.get_frame		= at91_get_frame,
+	.wakeup			= at91_wakeup,
+	.set_selfpowered	= at91_set_selfpowered,
+	.vbus_session		= at91_vbus_session,
+	.pullup			= at91_pullup,
+	.start			= at91_start,
+	.stop			= at91_stop,
+
+	/*
+	 * VBUS-powered devices may also also want to support bigger
+	 * power budgets after an appropriate SET_CONFIGURATION.
+	 */
+	/* .vbus_power		= at91_vbus_power, */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int handle_ep(struct at91_ep *ep)
+{
+	struct at91_request	*req;
+	u32 __iomem		*creg = ep->creg;
+	u32			csr = __raw_readl(creg);
+
+	if (!list_empty(&ep->queue))
+		req = list_entry(ep->queue.next,
+			struct at91_request, queue);
+	else
+		req = NULL;
+
+	if (ep->is_in) {
+		if (csr & (AT91_UDP_STALLSENT | AT91_UDP_TXCOMP)) {
+			csr |= CLR_FX;
+			csr &= ~(SET_FX | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP);
+			__raw_writel(csr, creg);
+		}
+		if (req)
+			return write_fifo(ep, req);
+
+	} else {
+		if (csr & AT91_UDP_STALLSENT) {
+			/* STALLSENT bit == ISOERR */
+			if (ep->is_iso && req)
+				req->req.status = -EILSEQ;
+			csr |= CLR_FX;
+			csr &= ~(SET_FX | AT91_UDP_STALLSENT);
+			__raw_writel(csr, creg);
+			csr = __raw_readl(creg);
+		}
+		if (req && (csr & RX_DATA_READY))
+			return read_fifo(ep, req);
+	}
+	return 0;
+}
+
+union setup {
+	u8			raw[8];
+	struct usb_ctrlrequest	r;
+};
+
+static void handle_setup(struct at91_udc *udc, struct at91_ep *ep, u32 csr)
+{
+	u32 __iomem	*creg = ep->creg;
+	u8 __iomem	*dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
+	unsigned	rxcount, i = 0;
+	u32		tmp;
+	union setup	pkt;
+	int		status = 0;
+
+	/* read and ack SETUP; hard-fail for bogus packets */
+	rxcount = (csr & AT91_UDP_RXBYTECNT) >> 16;
+	if (likely(rxcount == 8)) {
+		while (rxcount--)
+			pkt.raw[i++] = __raw_readb(dreg);
+		if (pkt.r.bRequestType & USB_DIR_IN) {
+			csr |= AT91_UDP_DIR;
+			ep->is_in = 1;
+		} else {
+			csr &= ~AT91_UDP_DIR;
+			ep->is_in = 0;
+		}
+	} else {
+		/* REVISIT this happens sometimes under load; why?? */
+		ERR("SETUP len %d, csr %08x\n", rxcount, csr);
+		status = -EINVAL;
+	}
+	csr |= CLR_FX;
+	csr &= ~(SET_FX | AT91_UDP_RXSETUP);
+	__raw_writel(csr, creg);
+	udc->wait_for_addr_ack = 0;
+	udc->wait_for_config_ack = 0;
+	ep->stopped = 0;
+	if (unlikely(status != 0))
+		goto stall;
+
+#define w_index		le16_to_cpu(pkt.r.wIndex)
+#define w_value		le16_to_cpu(pkt.r.wValue)
+#define w_length	le16_to_cpu(pkt.r.wLength)
+
+	VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
+			pkt.r.bRequestType, pkt.r.bRequest,
+			w_value, w_index, w_length);
+
+	/*
+	 * A few standard requests get handled here, ones that touch
+	 * hardware ... notably for device and endpoint features.
+	 */
+	udc->req_pending = 1;
+	csr = __raw_readl(creg);
+	csr |= CLR_FX;
+	csr &= ~SET_FX;
+	switch ((pkt.r.bRequestType << 8) | pkt.r.bRequest) {
+
+	case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+			| USB_REQ_SET_ADDRESS:
+		__raw_writel(csr | AT91_UDP_TXPKTRDY, creg);
+		udc->addr = w_value;
+		udc->wait_for_addr_ack = 1;
+		udc->req_pending = 0;
+		/* FADDR is set later, when we ack host STATUS */
+		return;
+
+	case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+			| USB_REQ_SET_CONFIGURATION:
+		tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_CONFG;
+		if (pkt.r.wValue)
+			udc->wait_for_config_ack = (tmp == 0);
+		else
+			udc->wait_for_config_ack = (tmp != 0);
+		if (udc->wait_for_config_ack)
+			VDBG("wait for config\n");
+		/* CONFG is toggled later, if gadget driver succeeds */
+		break;
+
+	/*
+	 * Hosts may set or clear remote wakeup status, and
+	 * devices may report they're VBUS powered.
+	 */
+	case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+			| USB_REQ_GET_STATUS:
+		tmp = (udc->selfpowered << USB_DEVICE_SELF_POWERED);
+		if (at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_ESR)
+			tmp |= (1 << USB_DEVICE_REMOTE_WAKEUP);
+		PACKET("get device status\n");
+		__raw_writeb(tmp, dreg);
+		__raw_writeb(0, dreg);
+		goto write_in;
+		/* then STATUS starts later, automatically */
+	case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+			| USB_REQ_SET_FEATURE:
+		if (w_value != USB_DEVICE_REMOTE_WAKEUP)
+			goto stall;
+		tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+		tmp |= AT91_UDP_ESR;
+		at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
+		goto succeed;
+	case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+			| USB_REQ_CLEAR_FEATURE:
+		if (w_value != USB_DEVICE_REMOTE_WAKEUP)
+			goto stall;
+		tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+		tmp &= ~AT91_UDP_ESR;
+		at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
+		goto succeed;
+
+	/*
+	 * Interfaces have no feature settings; this is pretty useless.
+	 * we won't even insist the interface exists...
+	 */
+	case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
+			| USB_REQ_GET_STATUS:
+		PACKET("get interface status\n");
+		__raw_writeb(0, dreg);
+		__raw_writeb(0, dreg);
+		goto write_in;
+		/* then STATUS starts later, automatically */
+	case ((USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
+			| USB_REQ_SET_FEATURE:
+	case ((USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
+			| USB_REQ_CLEAR_FEATURE:
+		goto stall;
+
+	/*
+	 * Hosts may clear bulk/intr endpoint halt after the gadget
+	 * driver sets it (not widely used); or set it (for testing)
+	 */
+	case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
+			| USB_REQ_GET_STATUS:
+		tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
+		ep = &udc->ep[tmp];
+		if (tmp >= NUM_ENDPOINTS || (tmp && !ep->desc))
+			goto stall;
+
+		if (tmp) {
+			if ((w_index & USB_DIR_IN)) {
+				if (!ep->is_in)
+					goto stall;
+			} else if (ep->is_in)
+				goto stall;
+		}
+		PACKET("get %s status\n", ep->ep.name);
+		if (__raw_readl(ep->creg) & AT91_UDP_FORCESTALL)
+			tmp = (1 << USB_ENDPOINT_HALT);
+		else
+			tmp = 0;
+		__raw_writeb(tmp, dreg);
+		__raw_writeb(0, dreg);
+		goto write_in;
+		/* then STATUS starts later, automatically */
+	case ((USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
+			| USB_REQ_SET_FEATURE:
+		tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
+		ep = &udc->ep[tmp];
+		if (w_value != USB_ENDPOINT_HALT || tmp >= NUM_ENDPOINTS)
+			goto stall;
+		if (!ep->desc || ep->is_iso)
+			goto stall;
+		if ((w_index & USB_DIR_IN)) {
+			if (!ep->is_in)
+				goto stall;
+		} else if (ep->is_in)
+			goto stall;
+
+		tmp = __raw_readl(ep->creg);
+		tmp &= ~SET_FX;
+		tmp |= CLR_FX | AT91_UDP_FORCESTALL;
+		__raw_writel(tmp, ep->creg);
+		goto succeed;
+	case ((USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
+			| USB_REQ_CLEAR_FEATURE:
+		tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
+		ep = &udc->ep[tmp];
+		if (w_value != USB_ENDPOINT_HALT || tmp >= NUM_ENDPOINTS)
+			goto stall;
+		if (tmp == 0)
+			goto succeed;
+		if (!ep->desc || ep->is_iso)
+			goto stall;
+		if ((w_index & USB_DIR_IN)) {
+			if (!ep->is_in)
+				goto stall;
+		} else if (ep->is_in)
+			goto stall;
+
+		at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
+		at91_udp_write(udc, AT91_UDP_RST_EP, 0);
+		tmp = __raw_readl(ep->creg);
+		tmp |= CLR_FX;
+		tmp &= ~(SET_FX | AT91_UDP_FORCESTALL);
+		__raw_writel(tmp, ep->creg);
+		if (!list_empty(&ep->queue))
+			handle_ep(ep);
+		goto succeed;
+	}
+
+#undef w_value
+#undef w_index
+#undef w_length
+
+	/* pass request up to the gadget driver */
+	if (udc->driver) {
+		spin_unlock(&udc->lock);
+		status = udc->driver->setup(&udc->gadget, &pkt.r);
+		spin_lock(&udc->lock);
+	}
+	else
+		status = -ENODEV;
+	if (status < 0) {
+stall:
+		VDBG("req %02x.%02x protocol STALL; stat %d\n",
+				pkt.r.bRequestType, pkt.r.bRequest, status);
+		csr |= AT91_UDP_FORCESTALL;
+		__raw_writel(csr, creg);
+		udc->req_pending = 0;
+	}
+	return;
+
+succeed:
+	/* immediate successful (IN) STATUS after zero length DATA */
+	PACKET("ep0 in/status\n");
+write_in:
+	csr |= AT91_UDP_TXPKTRDY;
+	__raw_writel(csr, creg);
+	udc->req_pending = 0;
+}
+
+static void handle_ep0(struct at91_udc *udc)
+{
+	struct at91_ep		*ep0 = &udc->ep[0];
+	u32 __iomem		*creg = ep0->creg;
+	u32			csr = __raw_readl(creg);
+	struct at91_request	*req;
+
+	if (unlikely(csr & AT91_UDP_STALLSENT)) {
+		nuke(ep0, -EPROTO);
+		udc->req_pending = 0;
+		csr |= CLR_FX;
+		csr &= ~(SET_FX | AT91_UDP_STALLSENT | AT91_UDP_FORCESTALL);
+		__raw_writel(csr, creg);
+		VDBG("ep0 stalled\n");
+		csr = __raw_readl(creg);
+	}
+	if (csr & AT91_UDP_RXSETUP) {
+		nuke(ep0, 0);
+		udc->req_pending = 0;
+		handle_setup(udc, ep0, csr);
+		return;
+	}
+
+	if (list_empty(&ep0->queue))
+		req = NULL;
+	else
+		req = list_entry(ep0->queue.next, struct at91_request, queue);
+
+	/* host ACKed an IN packet that we sent */
+	if (csr & AT91_UDP_TXCOMP) {
+		csr |= CLR_FX;
+		csr &= ~(SET_FX | AT91_UDP_TXCOMP);
+
+		/* write more IN DATA? */
+		if (req && ep0->is_in) {
+			if (handle_ep(ep0))
+				udc->req_pending = 0;
+
+		/*
+		 * Ack after:
+		 *  - last IN DATA packet (including GET_STATUS)
+		 *  - IN/STATUS for OUT DATA
+		 *  - IN/STATUS for any zero-length DATA stage
+		 * except for the IN DATA case, the host should send
+		 * an OUT status later, which we'll ack.
+		 */
+		} else {
+			udc->req_pending = 0;
+			__raw_writel(csr, creg);
+
+			/*
+			 * SET_ADDRESS takes effect only after the STATUS
+			 * (to the original address) gets acked.
+			 */
+			if (udc->wait_for_addr_ack) {
+				u32	tmp;
+
+				at91_udp_write(udc, AT91_UDP_FADDR,
+						AT91_UDP_FEN | udc->addr);
+				tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+				tmp &= ~AT91_UDP_FADDEN;
+				if (udc->addr)
+					tmp |= AT91_UDP_FADDEN;
+				at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
+
+				udc->wait_for_addr_ack = 0;
+				VDBG("address %d\n", udc->addr);
+			}
+		}
+	}
+
+	/* OUT packet arrived ... */
+	else if (csr & AT91_UDP_RX_DATA_BK0) {
+		csr |= CLR_FX;
+		csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
+
+		/* OUT DATA stage */
+		if (!ep0->is_in) {
+			if (req) {
+				if (handle_ep(ep0)) {
+					/* send IN/STATUS */
+					PACKET("ep0 in/status\n");
+					csr = __raw_readl(creg);
+					csr &= ~SET_FX;
+					csr |= CLR_FX | AT91_UDP_TXPKTRDY;
+					__raw_writel(csr, creg);
+					udc->req_pending = 0;
+				}
+			} else if (udc->req_pending) {
+				/*
+				 * AT91 hardware has a hard time with this
+				 * "deferred response" mode for control-OUT
+				 * transfers.  (For control-IN it's fine.)
+				 *
+				 * The normal solution leaves OUT data in the
+				 * fifo until the gadget driver is ready.
+				 * We couldn't do that here without disabling
+				 * the IRQ that tells about SETUP packets,
+				 * e.g. when the host gets impatient...
+				 *
+				 * Working around it by copying into a buffer
+				 * would almost be a non-deferred response,
+				 * except that it wouldn't permit reliable
+				 * stalling of the request.  Instead, demand
+				 * that gadget drivers not use this mode.
+				 */
+				DBG("no control-OUT deferred responses!\n");
+				__raw_writel(csr | AT91_UDP_FORCESTALL, creg);
+				udc->req_pending = 0;
+			}
+
+		/* STATUS stage for control-IN; ack.  */
+		} else {
+			PACKET("ep0 out/status ACK\n");
+			__raw_writel(csr, creg);
+
+			/* "early" status stage */
+			if (req)
+				done(ep0, req, 0);
+		}
+	}
+}
+
+static irqreturn_t at91_udc_irq (int irq, void *_udc)
+{
+	struct at91_udc		*udc = _udc;
+	u32			rescans = 5;
+	int			disable_clock = 0;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	if (!udc->clocked) {
+		clk_on(udc);
+		disable_clock = 1;
+	}
+
+	while (rescans--) {
+		u32 status;
+
+		status = at91_udp_read(udc, AT91_UDP_ISR)
+			& at91_udp_read(udc, AT91_UDP_IMR);
+		if (!status)
+			break;
+
+		/* USB reset irq:  not maskable */
+		if (status & AT91_UDP_ENDBUSRES) {
+			at91_udp_write(udc, AT91_UDP_IDR, ~MINIMUS_INTERRUPTUS);
+			at91_udp_write(udc, AT91_UDP_IER, MINIMUS_INTERRUPTUS);
+			/* Atmel code clears this irq twice */
+			at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES);
+			at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES);
+			VDBG("end bus reset\n");
+			udc->addr = 0;
+			stop_activity(udc);
+
+			/* enable ep0 */
+			at91_udp_write(udc, AT91_UDP_CSR(0),
+					AT91_UDP_EPEDS | AT91_UDP_EPTYPE_CTRL);
+			udc->gadget.speed = USB_SPEED_FULL;
+			udc->suspended = 0;
+			at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_EP(0));
+
+			/*
+			 * NOTE:  this driver keeps clocks off unless the
+			 * USB host is present.  That saves power, but for
+			 * boards that don't support VBUS detection, both
+			 * clocks need to be active most of the time.
+			 */
+
+		/* host initiated suspend (3+ms bus idle) */
+		} else if (status & AT91_UDP_RXSUSP) {
+			at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXSUSP);
+			at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXRSM);
+			at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXSUSP);
+			/* VDBG("bus suspend\n"); */
+			if (udc->suspended)
+				continue;
+			udc->suspended = 1;
+
+			/*
+			 * NOTE:  when suspending a VBUS-powered device, the
+			 * gadget driver should switch into slow clock mode
+			 * and then into standby to avoid drawing more than
+			 * 500uA power (2500uA for some high-power configs).
+			 */
+			if (udc->driver && udc->driver->suspend) {
+				spin_unlock(&udc->lock);
+				udc->driver->suspend(&udc->gadget);
+				spin_lock(&udc->lock);
+			}
+
+		/* host initiated resume */
+		} else if (status & AT91_UDP_RXRSM) {
+			at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM);
+			at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXSUSP);
+			at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM);
+			/* VDBG("bus resume\n"); */
+			if (!udc->suspended)
+				continue;
+			udc->suspended = 0;
+
+			/*
+			 * NOTE:  for a VBUS-powered device, the gadget driver
+			 * would normally want to switch out of slow clock
+			 * mode into normal mode.
+			 */
+			if (udc->driver && udc->driver->resume) {
+				spin_unlock(&udc->lock);
+				udc->driver->resume(&udc->gadget);
+				spin_lock(&udc->lock);
+			}
+
+		/* endpoint IRQs are cleared by handling them */
+		} else {
+			int		i;
+			unsigned	mask = 1;
+			struct at91_ep	*ep = &udc->ep[1];
+
+			if (status & mask)
+				handle_ep0(udc);
+			for (i = 1; i < NUM_ENDPOINTS; i++) {
+				mask <<= 1;
+				if (status & mask)
+					handle_ep(ep);
+				ep++;
+			}
+		}
+	}
+
+	if (disable_clock)
+		clk_off(udc);
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void nop_release(struct device *dev)
+{
+	/* nothing to free */
+}
+
+static struct at91_udc controller = {
+	.gadget = {
+		.ops	= &at91_udc_ops,
+		.ep0	= &controller.ep[0].ep,
+		.name	= driver_name,
+		.dev	= {
+			.init_name = "gadget",
+			.release = nop_release,
+		}
+	},
+	.ep[0] = {
+		.ep = {
+			.name	= ep0name,
+			.ops	= &at91_ep_ops,
+		},
+		.udc		= &controller,
+		.maxpacket	= 8,
+		.int_mask	= 1 << 0,
+	},
+	.ep[1] = {
+		.ep = {
+			.name	= "ep1",
+			.ops	= &at91_ep_ops,
+		},
+		.udc		= &controller,
+		.is_pingpong	= 1,
+		.maxpacket	= 64,
+		.int_mask	= 1 << 1,
+	},
+	.ep[2] = {
+		.ep = {
+			.name	= "ep2",
+			.ops	= &at91_ep_ops,
+		},
+		.udc		= &controller,
+		.is_pingpong	= 1,
+		.maxpacket	= 64,
+		.int_mask	= 1 << 2,
+	},
+	.ep[3] = {
+		.ep = {
+			/* could actually do bulk too */
+			.name	= "ep3-int",
+			.ops	= &at91_ep_ops,
+		},
+		.udc		= &controller,
+		.maxpacket	= 8,
+		.int_mask	= 1 << 3,
+	},
+	.ep[4] = {
+		.ep = {
+			.name	= "ep4",
+			.ops	= &at91_ep_ops,
+		},
+		.udc		= &controller,
+		.is_pingpong	= 1,
+		.maxpacket	= 256,
+		.int_mask	= 1 << 4,
+	},
+	.ep[5] = {
+		.ep = {
+			.name	= "ep5",
+			.ops	= &at91_ep_ops,
+		},
+		.udc		= &controller,
+		.is_pingpong	= 1,
+		.maxpacket	= 256,
+		.int_mask	= 1 << 5,
+	},
+	/* ep6 and ep7 are also reserved (custom silicon might use them) */
+};
+
+static void at91_vbus_update(struct at91_udc *udc, unsigned value)
+{
+	value ^= udc->board.vbus_active_low;
+	if (value != udc->vbus)
+		at91_vbus_session(&udc->gadget, value);
+}
+
+static irqreturn_t at91_vbus_irq(int irq, void *_udc)
+{
+	struct at91_udc	*udc = _udc;
+
+	/* vbus needs at least brief debouncing */
+	udelay(10);
+	at91_vbus_update(udc, gpio_get_value(udc->board.vbus_pin));
+
+	return IRQ_HANDLED;
+}
+
+static void at91_vbus_timer_work(struct work_struct *work)
+{
+	struct at91_udc *udc = container_of(work, struct at91_udc,
+					    vbus_timer_work);
+
+	at91_vbus_update(udc, gpio_get_value_cansleep(udc->board.vbus_pin));
+
+	if (!timer_pending(&udc->vbus_timer))
+		mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
+}
+
+static void at91_vbus_timer(unsigned long data)
+{
+	struct at91_udc *udc = (struct at91_udc *)data;
+
+	/*
+	 * If we are polling vbus it is likely that the gpio is on an
+	 * bus such as i2c or spi which may sleep, so schedule some work
+	 * to read the vbus gpio
+	 */
+	if (!work_pending(&udc->vbus_timer_work))
+		schedule_work(&udc->vbus_timer_work);
+}
+
+static int at91_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct at91_udc	*udc = &controller;
+	int		retval;
+	unsigned long	flags;
+
+	if (!driver
+			|| driver->max_speed < USB_SPEED_FULL
+			|| !bind
+			|| !driver->setup) {
+		DBG("bad parameter.\n");
+		return -EINVAL;
+	}
+
+	if (udc->driver) {
+		DBG("UDC already has a gadget driver\n");
+		return -EBUSY;
+	}
+
+	udc->driver = driver;
+	udc->gadget.dev.driver = &driver->driver;
+	dev_set_drvdata(&udc->gadget.dev, &driver->driver);
+	udc->enabled = 1;
+	udc->selfpowered = 1;
+
+	retval = bind(&udc->gadget);
+	if (retval) {
+		DBG("bind() returned %d\n", retval);
+		udc->driver = NULL;
+		udc->gadget.dev.driver = NULL;
+		dev_set_drvdata(&udc->gadget.dev, NULL);
+		udc->enabled = 0;
+		udc->selfpowered = 0;
+		return retval;
+	}
+
+	spin_lock_irqsave(&udc->lock, flags);
+	pullup(udc, 1);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	DBG("bound to %s\n", driver->driver.name);
+	return 0;
+}
+
+static int at91_stop(struct usb_gadget_driver *driver)
+{
+	struct at91_udc *udc = &controller;
+	unsigned long	flags;
+
+	if (!driver || driver != udc->driver || !driver->unbind)
+		return -EINVAL;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	udc->enabled = 0;
+	at91_udp_write(udc, AT91_UDP_IDR, ~0);
+	pullup(udc, 0);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	driver->unbind(&udc->gadget);
+	udc->gadget.dev.driver = NULL;
+	dev_set_drvdata(&udc->gadget.dev, NULL);
+	udc->driver = NULL;
+
+	DBG("unbound from %s\n", driver->driver.name);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void at91udc_shutdown(struct platform_device *dev)
+{
+	struct at91_udc *udc = platform_get_drvdata(dev);
+	unsigned long	flags;
+
+	/* force disconnect on reboot */
+	spin_lock_irqsave(&udc->lock, flags);
+	pullup(platform_get_drvdata(dev), 0);
+	spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static void __devinit at91udc_of_init(struct at91_udc *udc,
+				     struct device_node *np)
+{
+	struct at91_udc_data *board = &udc->board;
+	u32 val;
+	enum of_gpio_flags flags;
+
+	if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0)
+		board->vbus_polled = 1;
+
+	board->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0,
+						  &flags);
+	board->vbus_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
+
+	board->pullup_pin = of_get_named_gpio_flags(np, "atmel,pullup-gpio", 0,
+						  &flags);
+
+	board->pullup_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
+}
+
+static int __devinit at91udc_probe(struct platform_device *pdev)
+{
+	struct device	*dev = &pdev->dev;
+	struct at91_udc	*udc;
+	int		retval;
+	struct resource	*res;
+
+	if (!dev->platform_data && !pdev->dev.of_node) {
+		/* small (so we copy it) but critical! */
+		DBG("missing platform_data\n");
+		return -ENODEV;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENXIO;
+
+	if (!request_mem_region(res->start, resource_size(res), driver_name)) {
+		DBG("someone's using UDC memory\n");
+		return -EBUSY;
+	}
+
+	/* init software state */
+	udc = &controller;
+	udc->gadget.dev.parent = dev;
+	if (pdev->dev.of_node)
+		at91udc_of_init(udc, pdev->dev.of_node);
+	else
+		memcpy(&udc->board, dev->platform_data,
+		       sizeof(struct at91_udc_data));
+	udc->pdev = pdev;
+	udc->enabled = 0;
+	spin_lock_init(&udc->lock);
+
+	/* rm9200 needs manual D+ pullup; off by default */
+	if (cpu_is_at91rm9200()) {
+		if (gpio_is_valid(udc->board.pullup_pin)) {
+			DBG("no D+ pullup?\n");
+			retval = -ENODEV;
+			goto fail0;
+		}
+		retval = gpio_request(udc->board.pullup_pin, "udc_pullup");
+		if (retval) {
+			DBG("D+ pullup is busy\n");
+			goto fail0;
+		}
+		gpio_direction_output(udc->board.pullup_pin,
+				udc->board.pullup_active_low);
+	}
+
+	/* newer chips have more FIFO memory than rm9200 */
+	if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) {
+		udc->ep[0].maxpacket = 64;
+		udc->ep[3].maxpacket = 64;
+		udc->ep[4].maxpacket = 512;
+		udc->ep[5].maxpacket = 512;
+	} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
+		udc->ep[3].maxpacket = 64;
+	} else if (cpu_is_at91sam9263()) {
+		udc->ep[0].maxpacket = 64;
+		udc->ep[3].maxpacket = 64;
+	}
+
+	udc->udp_baseaddr = ioremap(res->start, resource_size(res));
+	if (!udc->udp_baseaddr) {
+		retval = -ENOMEM;
+		goto fail0a;
+	}
+
+	udc_reinit(udc);
+
+	/* get interface and function clocks */
+	udc->iclk = clk_get(dev, "udc_clk");
+	udc->fclk = clk_get(dev, "udpck");
+	if (IS_ERR(udc->iclk) || IS_ERR(udc->fclk)) {
+		DBG("clocks missing\n");
+		retval = -ENODEV;
+		/* NOTE: we "know" here that refcounts on these are NOPs */
+		goto fail0b;
+	}
+
+	retval = device_register(&udc->gadget.dev);
+	if (retval < 0) {
+		put_device(&udc->gadget.dev);
+		goto fail0b;
+	}
+
+	/* don't do anything until we have both gadget driver and VBUS */
+	clk_enable(udc->iclk);
+	at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
+	at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
+	/* Clear all pending interrupts - UDP may be used by bootloader. */
+	at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
+	clk_disable(udc->iclk);
+
+	/* request UDC and maybe VBUS irqs */
+	udc->udp_irq = platform_get_irq(pdev, 0);
+	retval = request_irq(udc->udp_irq, at91_udc_irq,
+			0, driver_name, udc);
+	if (retval < 0) {
+		DBG("request irq %d failed\n", udc->udp_irq);
+		goto fail1;
+	}
+	if (gpio_is_valid(udc->board.vbus_pin)) {
+		retval = gpio_request(udc->board.vbus_pin, "udc_vbus");
+		if (retval < 0) {
+			DBG("request vbus pin failed\n");
+			goto fail2;
+		}
+		gpio_direction_input(udc->board.vbus_pin);
+
+		/*
+		 * Get the initial state of VBUS - we cannot expect
+		 * a pending interrupt.
+		 */
+		udc->vbus = gpio_get_value_cansleep(udc->board.vbus_pin) ^
+			udc->board.vbus_active_low;
+
+		if (udc->board.vbus_polled) {
+			INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
+			setup_timer(&udc->vbus_timer, at91_vbus_timer,
+				    (unsigned long)udc);
+			mod_timer(&udc->vbus_timer,
+				  jiffies + VBUS_POLL_TIMEOUT);
+		} else {
+			if (request_irq(gpio_to_irq(udc->board.vbus_pin),
+					at91_vbus_irq, 0, driver_name, udc)) {
+				DBG("request vbus irq %d failed\n",
+				    udc->board.vbus_pin);
+				retval = -EBUSY;
+				goto fail3;
+			}
+		}
+	} else {
+		DBG("no VBUS detection, assuming always-on\n");
+		udc->vbus = 1;
+	}
+	retval = usb_add_gadget_udc(dev, &udc->gadget);
+	if (retval)
+		goto fail4;
+	dev_set_drvdata(dev, udc);
+	device_init_wakeup(dev, 1);
+	create_debug_file(udc);
+
+	INFO("%s version %s\n", driver_name, DRIVER_VERSION);
+	return 0;
+fail4:
+	if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled)
+		free_irq(gpio_to_irq(udc->board.vbus_pin), udc);
+fail3:
+	if (gpio_is_valid(udc->board.vbus_pin))
+		gpio_free(udc->board.vbus_pin);
+fail2:
+	free_irq(udc->udp_irq, udc);
+fail1:
+	device_unregister(&udc->gadget.dev);
+fail0b:
+	iounmap(udc->udp_baseaddr);
+fail0a:
+	if (cpu_is_at91rm9200())
+		gpio_free(udc->board.pullup_pin);
+fail0:
+	release_mem_region(res->start, resource_size(res));
+	DBG("%s probe failed, %d\n", driver_name, retval);
+	return retval;
+}
+
+static int __exit at91udc_remove(struct platform_device *pdev)
+{
+	struct at91_udc *udc = platform_get_drvdata(pdev);
+	struct resource *res;
+	unsigned long	flags;
+
+	DBG("remove\n");
+
+	usb_del_gadget_udc(&udc->gadget);
+	if (udc->driver)
+		return -EBUSY;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	pullup(udc, 0);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	device_init_wakeup(&pdev->dev, 0);
+	remove_debug_file(udc);
+	if (gpio_is_valid(udc->board.vbus_pin)) {
+		free_irq(gpio_to_irq(udc->board.vbus_pin), udc);
+		gpio_free(udc->board.vbus_pin);
+	}
+	free_irq(udc->udp_irq, udc);
+	device_unregister(&udc->gadget.dev);
+
+	iounmap(udc->udp_baseaddr);
+
+	if (cpu_is_at91rm9200())
+		gpio_free(udc->board.pullup_pin);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	clk_put(udc->iclk);
+	clk_put(udc->fclk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	struct at91_udc *udc = platform_get_drvdata(pdev);
+	int		wake = udc->driver && device_may_wakeup(&pdev->dev);
+	unsigned long	flags;
+
+	/* Unless we can act normally to the host (letting it wake us up
+	 * whenever it has work for us) force disconnect.  Wakeup requires
+	 * PLLB for USB events (signaling for reset, wakeup, or incoming
+	 * tokens) and VBUS irqs (on systems which support them).
+	 */
+	if ((!udc->suspended && udc->addr)
+			|| !wake
+			|| at91_suspend_entering_slow_clock()) {
+		spin_lock_irqsave(&udc->lock, flags);
+		pullup(udc, 0);
+		wake = 0;
+		spin_unlock_irqrestore(&udc->lock, flags);
+	} else
+		enable_irq_wake(udc->udp_irq);
+
+	udc->active_suspend = wake;
+	if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled && wake)
+		enable_irq_wake(udc->board.vbus_pin);
+	return 0;
+}
+
+static int at91udc_resume(struct platform_device *pdev)
+{
+	struct at91_udc *udc = platform_get_drvdata(pdev);
+	unsigned long	flags;
+
+	if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled &&
+	    udc->active_suspend)
+		disable_irq_wake(udc->board.vbus_pin);
+
+	/* maybe reconnect to host; if so, clocks on */
+	if (udc->active_suspend)
+		disable_irq_wake(udc->udp_irq);
+	else {
+		spin_lock_irqsave(&udc->lock, flags);
+		pullup(udc, 1);
+		spin_unlock_irqrestore(&udc->lock, flags);
+	}
+	return 0;
+}
+#else
+#define	at91udc_suspend	NULL
+#define	at91udc_resume	NULL
+#endif
+
+#if defined(CONFIG_OF)
+static const struct of_device_id at91_udc_dt_ids[] = {
+	{ .compatible = "atmel,at91rm9200-udc" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_udc_dt_ids);
+#endif
+
+static struct platform_driver at91_udc_driver = {
+	.remove		= __exit_p(at91udc_remove),
+	.shutdown	= at91udc_shutdown,
+	.suspend	= at91udc_suspend,
+	.resume		= at91udc_resume,
+	.driver		= {
+		.name	= (char *) driver_name,
+		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(at91_udc_dt_ids),
+	},
+};
+
+static int __init udc_init_module(void)
+{
+	return platform_driver_probe(&at91_udc_driver, at91udc_probe);
+}
+module_init(udc_init_module);
+
+static void __exit udc_exit_module(void)
+{
+	platform_driver_unregister(&at91_udc_driver);
+}
+module_exit(udc_exit_module);
+
+MODULE_DESCRIPTION("AT91 udc driver");
+MODULE_AUTHOR("Thomas Rathbone, David Brownell");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_udc");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/at91_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/at91_udc.h
new file mode 100644
index 0000000..3c0315b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/at91_udc.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2004 by Thomas Rathbone, HP Labs
+ * Copyright (C) 2005 by Ivan Kokshaysky
+ * Copyright (C) 2006 by SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91_UDC_H
+#define AT91_UDC_H
+
+/*
+ * USB Device Port (UDP) registers.
+ * Based on AT91RM9200 datasheet revision E.
+ */
+
+#define AT91_UDP_FRM_NUM	0x00		/* Frame Number Register */
+#define     AT91_UDP_NUM	(0x7ff <<  0)	/* Frame Number */
+#define     AT91_UDP_FRM_ERR	(1     << 16)	/* Frame Error */
+#define     AT91_UDP_FRM_OK	(1     << 17)	/* Frame OK */
+
+#define AT91_UDP_GLB_STAT	0x04		/* Global State Register */
+#define     AT91_UDP_FADDEN	(1 <<  0)	/* Function Address Enable */
+#define     AT91_UDP_CONFG	(1 <<  1)	/* Configured */
+#define     AT91_UDP_ESR	(1 <<  2)	/* Enable Send Resume */
+#define     AT91_UDP_RSMINPR	(1 <<  3)	/* Resume has been sent */
+#define     AT91_UDP_RMWUPE	(1 <<  4)	/* Remote Wake Up Enable */
+
+#define AT91_UDP_FADDR		0x08		/* Function Address Register */
+#define     AT91_UDP_FADD	(0x7f << 0)	/* Function Address Value */
+#define     AT91_UDP_FEN	(1    << 8)	/* Function Enable */
+
+#define AT91_UDP_IER		0x10		/* Interrupt Enable Register */
+#define AT91_UDP_IDR		0x14		/* Interrupt Disable Register */
+#define AT91_UDP_IMR		0x18		/* Interrupt Mask Register */
+
+#define AT91_UDP_ISR		0x1c		/* Interrupt Status Register */
+#define     AT91_UDP_EP(n)	(1 << (n))	/* Endpoint Interrupt Status */
+#define     AT91_UDP_RXSUSP	(1 <<  8) 	/* USB Suspend Interrupt Status */
+#define     AT91_UDP_RXRSM	(1 <<  9)	/* USB Resume Interrupt Status */
+#define     AT91_UDP_EXTRSM	(1 << 10)	/* External Resume Interrupt Status [AT91RM9200 only] */
+#define     AT91_UDP_SOFINT	(1 << 11)	/* Start of Frame Interrupt Status */
+#define     AT91_UDP_ENDBUSRES	(1 << 12)	/* End of Bus Reset Interrupt Status */
+#define     AT91_UDP_WAKEUP	(1 << 13)	/* USB Wakeup Interrupt Status [AT91RM9200 only] */
+
+#define AT91_UDP_ICR		0x20		/* Interrupt Clear Register */
+#define AT91_UDP_RST_EP		0x28		/* Reset Endpoint Register */
+
+#define AT91_UDP_CSR(n)		(0x30+((n)*4))	/* Endpoint Control/Status Registers 0-7 */
+#define     AT91_UDP_TXCOMP	(1 <<  0)	/* Generates IN packet with data previously written in DPR */
+#define     AT91_UDP_RX_DATA_BK0 (1 <<  1)	/* Receive Data Bank 0 */
+#define     AT91_UDP_RXSETUP	(1 <<  2)	/* Send STALL to the host */
+#define     AT91_UDP_STALLSENT	(1 <<  3)	/* Stall Sent / Isochronous error (Isochronous endpoints) */
+#define     AT91_UDP_TXPKTRDY	(1 <<  4)	/* Transmit Packet Ready */
+#define     AT91_UDP_FORCESTALL	(1 <<  5)	/* Force Stall */
+#define     AT91_UDP_RX_DATA_BK1 (1 <<  6)	/* Receive Data Bank 1 */
+#define     AT91_UDP_DIR	(1 <<  7)	/* Transfer Direction */
+#define     AT91_UDP_EPTYPE	(7 <<  8)	/* Endpoint Type */
+#define		AT91_UDP_EPTYPE_CTRL		(0 <<  8)
+#define		AT91_UDP_EPTYPE_ISO_OUT		(1 <<  8)
+#define		AT91_UDP_EPTYPE_BULK_OUT	(2 <<  8)
+#define		AT91_UDP_EPTYPE_INT_OUT		(3 <<  8)
+#define		AT91_UDP_EPTYPE_ISO_IN		(5 <<  8)
+#define		AT91_UDP_EPTYPE_BULK_IN		(6 <<  8)
+#define		AT91_UDP_EPTYPE_INT_IN		(7 <<  8)
+#define     AT91_UDP_DTGLE	(1 << 11)	/* Data Toggle */
+#define     AT91_UDP_EPEDS	(1 << 15)	/* Endpoint Enable/Disable */
+#define     AT91_UDP_RXBYTECNT	(0x7ff << 16)	/* Number of bytes in FIFO */
+
+#define AT91_UDP_FDR(n)		(0x50+((n)*4))	/* Endpoint FIFO Data Registers 0-7 */
+
+#define AT91_UDP_TXVC		0x74		/* Transceiver Control Register */
+#define     AT91_UDP_TXVC_TXVDIS (1 << 8)	/* Transceiver Disable */
+#define     AT91_UDP_TXVC_PUON   (1 << 9)	/* PullUp On [AT91SAM9260 only] */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * controller driver data structures
+ */
+
+#define	NUM_ENDPOINTS	6
+
+/*
+ * hardware won't disable bus reset, or resume while the controller
+ * is suspended ... watching suspend helps keep the logic symmetric.
+ */
+#define	MINIMUS_INTERRUPTUS \
+	(AT91_UDP_ENDBUSRES | AT91_UDP_RXRSM | AT91_UDP_RXSUSP)
+
+struct at91_ep {
+	struct usb_ep			ep;
+	struct list_head		queue;
+	struct at91_udc			*udc;
+	void __iomem			*creg;
+
+	unsigned			maxpacket:16;
+	u8				int_mask;
+	unsigned			is_pingpong:1;
+
+	unsigned			stopped:1;
+	unsigned			is_in:1;
+	unsigned			is_iso:1;
+	unsigned			fifo_bank:1;
+
+	const struct usb_endpoint_descriptor
+					*desc;
+};
+
+/*
+ * driver is non-SMP, and just blocks IRQs whenever it needs
+ * access protection for chip registers or driver state
+ */
+struct at91_udc {
+	struct usb_gadget		gadget;
+	struct at91_ep			ep[NUM_ENDPOINTS];
+	struct usb_gadget_driver	*driver;
+	unsigned			vbus:1;
+	unsigned			enabled:1;
+	unsigned			clocked:1;
+	unsigned			suspended:1;
+	unsigned			req_pending:1;
+	unsigned			wait_for_addr_ack:1;
+	unsigned			wait_for_config_ack:1;
+	unsigned			selfpowered:1;
+	unsigned			active_suspend:1;
+	u8				addr;
+	struct at91_udc_data		board;
+	struct clk			*iclk, *fclk;
+	struct platform_device		*pdev;
+	struct proc_dir_entry		*pde;
+	void __iomem			*udp_baseaddr;
+	int				udp_irq;
+	spinlock_t			lock;
+	struct timer_list		vbus_timer;
+	struct work_struct		vbus_timer_work;
+};
+
+static inline struct at91_udc *to_udc(struct usb_gadget *g)
+{
+	return container_of(g, struct at91_udc, gadget);
+}
+
+struct at91_request {
+	struct usb_request		req;
+	struct list_head		queue;
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef VERBOSE_DEBUG
+#    define VDBG		DBG
+#else
+#    define VDBG(stuff...)	do{}while(0)
+#endif
+
+#ifdef PACKET_TRACE
+#    define PACKET		VDBG
+#else
+#    define PACKET(stuff...)	do{}while(0)
+#endif
+
+#define ERR(stuff...)		pr_err("udc: " stuff)
+#define WARNING(stuff...)	pr_warning("udc: " stuff)
+#define INFO(stuff...)		pr_info("udc: " stuff)
+#define DBG(stuff...)		pr_debug("udc: " stuff)
+
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/atmel_usba_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/atmel_usba_udc.c
new file mode 100644
index 0000000..9f98508
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/atmel_usba_udc.c
@@ -0,0 +1,2118 @@
+/*
+ * Driver for the Atmel USBA high speed USB device controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/atmel_usba_udc.h>
+#include <linux/delay.h>
+
+#include <asm/gpio.h>
+#include <mach/board.h>
+
+#include "atmel_usba_udc.h"
+
+
+static struct usba_udc the_udc;
+static struct usba_ep *usba_ep;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+static int queue_dbg_open(struct inode *inode, struct file *file)
+{
+	struct usba_ep *ep = inode->i_private;
+	struct usba_request *req, *req_copy;
+	struct list_head *queue_data;
+
+	queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
+	if (!queue_data)
+		return -ENOMEM;
+	INIT_LIST_HEAD(queue_data);
+
+	spin_lock_irq(&ep->udc->lock);
+	list_for_each_entry(req, &ep->queue, queue) {
+		req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
+		if (!req_copy)
+			goto fail;
+		list_add_tail(&req_copy->queue, queue_data);
+	}
+	spin_unlock_irq(&ep->udc->lock);
+
+	file->private_data = queue_data;
+	return 0;
+
+fail:
+	spin_unlock_irq(&ep->udc->lock);
+	list_for_each_entry_safe(req, req_copy, queue_data, queue) {
+		list_del(&req->queue);
+		kfree(req);
+	}
+	kfree(queue_data);
+	return -ENOMEM;
+}
+
+/*
+ * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
+ *
+ * b: buffer address
+ * l: buffer length
+ * I/i: interrupt/no interrupt
+ * Z/z: zero/no zero
+ * S/s: short ok/short not ok
+ * s: status
+ * n: nr_packets
+ * F/f: submitted/not submitted to FIFO
+ * D/d: using/not using DMA
+ * L/l: last transaction/not last transaction
+ */
+static ssize_t queue_dbg_read(struct file *file, char __user *buf,
+		size_t nbytes, loff_t *ppos)
+{
+	struct list_head *queue = file->private_data;
+	struct usba_request *req, *tmp_req;
+	size_t len, remaining, actual = 0;
+	char tmpbuf[38];
+
+	if (!access_ok(VERIFY_WRITE, buf, nbytes))
+		return -EFAULT;
+
+	mutex_lock(&file->f_dentry->d_inode->i_mutex);
+	list_for_each_entry_safe(req, tmp_req, queue, queue) {
+		len = snprintf(tmpbuf, sizeof(tmpbuf),
+				"%8p %08x %c%c%c %5d %c%c%c\n",
+				req->req.buf, req->req.length,
+				req->req.no_interrupt ? 'i' : 'I',
+				req->req.zero ? 'Z' : 'z',
+				req->req.short_not_ok ? 's' : 'S',
+				req->req.status,
+				req->submitted ? 'F' : 'f',
+				req->using_dma ? 'D' : 'd',
+				req->last_transaction ? 'L' : 'l');
+		len = min(len, sizeof(tmpbuf));
+		if (len > nbytes)
+			break;
+
+		list_del(&req->queue);
+		kfree(req);
+
+		remaining = __copy_to_user(buf, tmpbuf, len);
+		actual += len - remaining;
+		if (remaining)
+			break;
+
+		nbytes -= len;
+		buf += len;
+	}
+	mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+
+	return actual;
+}
+
+static int queue_dbg_release(struct inode *inode, struct file *file)
+{
+	struct list_head *queue_data = file->private_data;
+	struct usba_request *req, *tmp_req;
+
+	list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
+		list_del(&req->queue);
+		kfree(req);
+	}
+	kfree(queue_data);
+	return 0;
+}
+
+static int regs_dbg_open(struct inode *inode, struct file *file)
+{
+	struct usba_udc *udc;
+	unsigned int i;
+	u32 *data;
+	int ret = -ENOMEM;
+
+	mutex_lock(&inode->i_mutex);
+	udc = inode->i_private;
+	data = kmalloc(inode->i_size, GFP_KERNEL);
+	if (!data)
+		goto out;
+
+	spin_lock_irq(&udc->lock);
+	for (i = 0; i < inode->i_size / 4; i++)
+		data[i] = __raw_readl(udc->regs + i * 4);
+	spin_unlock_irq(&udc->lock);
+
+	file->private_data = data;
+	ret = 0;
+
+out:
+	mutex_unlock(&inode->i_mutex);
+
+	return ret;
+}
+
+static ssize_t regs_dbg_read(struct file *file, char __user *buf,
+		size_t nbytes, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	int ret;
+
+	mutex_lock(&inode->i_mutex);
+	ret = simple_read_from_buffer(buf, nbytes, ppos,
+			file->private_data,
+			file->f_dentry->d_inode->i_size);
+	mutex_unlock(&inode->i_mutex);
+
+	return ret;
+}
+
+static int regs_dbg_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+const struct file_operations queue_dbg_fops = {
+	.owner		= THIS_MODULE,
+	.open		= queue_dbg_open,
+	.llseek		= no_llseek,
+	.read		= queue_dbg_read,
+	.release	= queue_dbg_release,
+};
+
+const struct file_operations regs_dbg_fops = {
+	.owner		= THIS_MODULE,
+	.open		= regs_dbg_open,
+	.llseek		= generic_file_llseek,
+	.read		= regs_dbg_read,
+	.release	= regs_dbg_release,
+};
+
+static void usba_ep_init_debugfs(struct usba_udc *udc,
+		struct usba_ep *ep)
+{
+	struct dentry *ep_root;
+
+	ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
+	if (!ep_root)
+		goto err_root;
+	ep->debugfs_dir = ep_root;
+
+	ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
+						ep, &queue_dbg_fops);
+	if (!ep->debugfs_queue)
+		goto err_queue;
+
+	if (ep->can_dma) {
+		ep->debugfs_dma_status
+			= debugfs_create_u32("dma_status", 0400, ep_root,
+					&ep->last_dma_status);
+		if (!ep->debugfs_dma_status)
+			goto err_dma_status;
+	}
+	if (ep_is_control(ep)) {
+		ep->debugfs_state
+			= debugfs_create_u32("state", 0400, ep_root,
+					&ep->state);
+		if (!ep->debugfs_state)
+			goto err_state;
+	}
+
+	return;
+
+err_state:
+	if (ep->can_dma)
+		debugfs_remove(ep->debugfs_dma_status);
+err_dma_status:
+	debugfs_remove(ep->debugfs_queue);
+err_queue:
+	debugfs_remove(ep_root);
+err_root:
+	dev_err(&ep->udc->pdev->dev,
+		"failed to create debugfs directory for %s\n", ep->ep.name);
+}
+
+static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
+{
+	debugfs_remove(ep->debugfs_queue);
+	debugfs_remove(ep->debugfs_dma_status);
+	debugfs_remove(ep->debugfs_state);
+	debugfs_remove(ep->debugfs_dir);
+	ep->debugfs_dma_status = NULL;
+	ep->debugfs_dir = NULL;
+}
+
+static void usba_init_debugfs(struct usba_udc *udc)
+{
+	struct dentry *root, *regs;
+	struct resource *regs_resource;
+
+	root = debugfs_create_dir(udc->gadget.name, NULL);
+	if (IS_ERR(root) || !root)
+		goto err_root;
+	udc->debugfs_root = root;
+
+	regs = debugfs_create_file("regs", 0400, root, udc, &regs_dbg_fops);
+	if (!regs)
+		goto err_regs;
+
+	regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
+				CTRL_IOMEM_ID);
+	regs->d_inode->i_size = resource_size(regs_resource);
+	udc->debugfs_regs = regs;
+
+	usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
+
+	return;
+
+err_regs:
+	debugfs_remove(root);
+err_root:
+	udc->debugfs_root = NULL;
+	dev_err(&udc->pdev->dev, "debugfs is not available\n");
+}
+
+static void usba_cleanup_debugfs(struct usba_udc *udc)
+{
+	usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
+	debugfs_remove(udc->debugfs_regs);
+	debugfs_remove(udc->debugfs_root);
+	udc->debugfs_regs = NULL;
+	udc->debugfs_root = NULL;
+}
+#else
+static inline void usba_ep_init_debugfs(struct usba_udc *udc,
+					 struct usba_ep *ep)
+{
+
+}
+
+static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
+{
+
+}
+
+static inline void usba_init_debugfs(struct usba_udc *udc)
+{
+
+}
+
+static inline void usba_cleanup_debugfs(struct usba_udc *udc)
+{
+
+}
+#endif
+
+static int vbus_is_present(struct usba_udc *udc)
+{
+	if (gpio_is_valid(udc->vbus_pin))
+		return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
+
+	/* No Vbus detection: Assume always present */
+	return 1;
+}
+
+#if defined(CONFIG_ARCH_AT91SAM9RL)
+
+#include <mach/at91_pmc.h>
+
+static void toggle_bias(int is_on)
+{
+	unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
+
+	if (is_on)
+		at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
+	else
+		at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
+}
+
+#else
+
+static void toggle_bias(int is_on)
+{
+}
+
+#endif /* CONFIG_ARCH_AT91SAM9RL */
+
+static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
+{
+	unsigned int transaction_len;
+
+	transaction_len = req->req.length - req->req.actual;
+	req->last_transaction = 1;
+	if (transaction_len > ep->ep.maxpacket) {
+		transaction_len = ep->ep.maxpacket;
+		req->last_transaction = 0;
+	} else if (transaction_len == ep->ep.maxpacket && req->req.zero)
+		req->last_transaction = 0;
+
+	DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
+		ep->ep.name, req, transaction_len,
+		req->last_transaction ? ", done" : "");
+
+	memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
+	usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
+	req->req.actual += transaction_len;
+}
+
+static void submit_request(struct usba_ep *ep, struct usba_request *req)
+{
+	DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
+		ep->ep.name, req, req->req.length);
+
+	req->req.actual = 0;
+	req->submitted = 1;
+
+	if (req->using_dma) {
+		if (req->req.length == 0) {
+			usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
+			return;
+		}
+
+		if (req->req.zero)
+			usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
+		else
+			usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
+
+		usba_dma_writel(ep, ADDRESS, req->req.dma);
+		usba_dma_writel(ep, CONTROL, req->ctrl);
+	} else {
+		next_fifo_transaction(ep, req);
+		if (req->last_transaction) {
+			usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
+			usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+		} else {
+			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+			usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
+		}
+	}
+}
+
+static void submit_next_request(struct usba_ep *ep)
+{
+	struct usba_request *req;
+
+	if (list_empty(&ep->queue)) {
+		usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
+		return;
+	}
+
+	req = list_entry(ep->queue.next, struct usba_request, queue);
+	if (!req->submitted)
+		submit_request(ep, req);
+}
+
+static void send_status(struct usba_udc *udc, struct usba_ep *ep)
+{
+	ep->state = STATUS_STAGE_IN;
+	usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
+	usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+}
+
+static void receive_data(struct usba_ep *ep)
+{
+	struct usba_udc *udc = ep->udc;
+	struct usba_request *req;
+	unsigned long status;
+	unsigned int bytecount, nr_busy;
+	int is_complete = 0;
+
+	status = usba_ep_readl(ep, STA);
+	nr_busy = USBA_BFEXT(BUSY_BANKS, status);
+
+	DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
+
+	while (nr_busy > 0) {
+		if (list_empty(&ep->queue)) {
+			usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+			break;
+		}
+		req = list_entry(ep->queue.next,
+				 struct usba_request, queue);
+
+		bytecount = USBA_BFEXT(BYTE_COUNT, status);
+
+		if (status & (1 << 31))
+			is_complete = 1;
+		if (req->req.actual + bytecount >= req->req.length) {
+			is_complete = 1;
+			bytecount = req->req.length - req->req.actual;
+		}
+
+		memcpy_fromio(req->req.buf + req->req.actual,
+				ep->fifo, bytecount);
+		req->req.actual += bytecount;
+
+		usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
+
+		if (is_complete) {
+			DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
+			req->req.status = 0;
+			list_del_init(&req->queue);
+			usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+			spin_unlock(&udc->lock);
+			req->req.complete(&ep->ep, &req->req);
+			spin_lock(&udc->lock);
+		}
+
+		status = usba_ep_readl(ep, STA);
+		nr_busy = USBA_BFEXT(BUSY_BANKS, status);
+
+		if (is_complete && ep_is_control(ep)) {
+			send_status(udc, ep);
+			break;
+		}
+	}
+}
+
+static void
+request_complete(struct usba_ep *ep, struct usba_request *req, int status)
+{
+	struct usba_udc *udc = ep->udc;
+
+	WARN_ON(!list_empty(&req->queue));
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+
+	if (req->mapped) {
+		dma_unmap_single(
+			&udc->pdev->dev, req->req.dma, req->req.length,
+			ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	}
+
+	DBG(DBG_GADGET | DBG_REQ,
+		"%s: req %p complete: status %d, actual %u\n",
+		ep->ep.name, req, req->req.status, req->req.actual);
+
+	spin_unlock(&udc->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&udc->lock);
+}
+
+static void
+request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
+{
+	struct usba_request *req, *tmp_req;
+
+	list_for_each_entry_safe(req, tmp_req, list, queue) {
+		list_del_init(&req->queue);
+		request_complete(ep, req, status);
+	}
+}
+
+static int
+usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct usba_ep *ep = to_usba_ep(_ep);
+	struct usba_udc *udc = ep->udc;
+	unsigned long flags, ept_cfg, maxpacket;
+	unsigned int nr_trans;
+
+	DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
+
+	maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
+
+	if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
+			|| ep->index == 0
+			|| desc->bDescriptorType != USB_DT_ENDPOINT
+			|| maxpacket == 0
+			|| maxpacket > ep->fifo_size) {
+		DBG(DBG_ERR, "ep_enable: Invalid argument");
+		return -EINVAL;
+	}
+
+	ep->is_isoc = 0;
+	ep->is_in = 0;
+
+	if (maxpacket <= 8)
+		ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
+	else
+		/* LSB is bit 1, not 0 */
+		ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
+
+	DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
+			ep->ep.name, ept_cfg, maxpacket);
+
+	if (usb_endpoint_dir_in(desc)) {
+		ep->is_in = 1;
+		ept_cfg |= USBA_EPT_DIR_IN;
+	}
+
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
+		ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (!ep->can_isoc) {
+			DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
+					ep->ep.name);
+			return -EINVAL;
+		}
+
+		/*
+		 * Bits 11:12 specify number of _additional_
+		 * transactions per microframe.
+		 */
+		nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
+		if (nr_trans > 3)
+			return -EINVAL;
+
+		ep->is_isoc = 1;
+		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
+
+		/*
+		 * Do triple-buffering on high-bandwidth iso endpoints.
+		 */
+		if (nr_trans > 1 && ep->nr_banks == 3)
+			ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE);
+		else
+			ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
+		ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
+		ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
+		ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
+		break;
+	}
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+
+	if (ep->desc) {
+		spin_unlock_irqrestore(&ep->udc->lock, flags);
+		DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
+		return -EBUSY;
+	}
+
+	ep->desc = desc;
+	ep->ep.maxpacket = maxpacket;
+
+	usba_ep_writel(ep, CFG, ept_cfg);
+	usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
+
+	if (ep->can_dma) {
+		u32 ctrl;
+
+		usba_writel(udc, INT_ENB,
+				(usba_readl(udc, INT_ENB)
+					| USBA_BF(EPT_INT, 1 << ep->index)
+					| USBA_BF(DMA_INT, 1 << ep->index)));
+		ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
+		usba_ep_writel(ep, CTL_ENB, ctrl);
+	} else {
+		usba_writel(udc, INT_ENB,
+				(usba_readl(udc, INT_ENB)
+					| USBA_BF(EPT_INT, 1 << ep->index)));
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
+			(unsigned long)usba_ep_readl(ep, CFG));
+	DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
+			(unsigned long)usba_readl(udc, INT_ENB));
+
+	return 0;
+}
+
+static int usba_ep_disable(struct usb_ep *_ep)
+{
+	struct usba_ep *ep = to_usba_ep(_ep);
+	struct usba_udc *udc = ep->udc;
+	LIST_HEAD(req_list);
+	unsigned long flags;
+
+	DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	if (!ep->desc) {
+		spin_unlock_irqrestore(&udc->lock, flags);
+		/* REVISIT because this driver disables endpoints in
+		 * reset_all_endpoints() before calling disconnect(),
+		 * most gadget drivers would trigger this non-error ...
+		 */
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN)
+			DBG(DBG_ERR, "ep_disable: %s not enabled\n",
+					ep->ep.name);
+		return -EINVAL;
+	}
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+
+	list_splice_init(&ep->queue, &req_list);
+	if (ep->can_dma) {
+		usba_dma_writel(ep, CONTROL, 0);
+		usba_dma_writel(ep, ADDRESS, 0);
+		usba_dma_readl(ep, STATUS);
+	}
+	usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
+	usba_writel(udc, INT_ENB,
+			usba_readl(udc, INT_ENB)
+			& ~USBA_BF(EPT_INT, 1 << ep->index));
+
+	request_complete_list(ep, &req_list, -ESHUTDOWN);
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static struct usb_request *
+usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct usba_request *req;
+
+	DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+	req->req.dma = DMA_ADDR_INVALID;
+
+	return &req->req;
+}
+
+static void
+usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct usba_request *req = to_usba_req(_req);
+
+	DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
+
+	kfree(req);
+}
+
+static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
+		struct usba_request *req, gfp_t gfp_flags)
+{
+	unsigned long flags;
+	int ret;
+
+	DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n",
+		ep->ep.name, req->req.length, req->req.dma,
+		req->req.zero ? 'Z' : 'z',
+		req->req.short_not_ok ? 'S' : 's',
+		req->req.no_interrupt ? 'I' : 'i');
+
+	if (req->req.length > 0x10000) {
+		/* Lengths from 0 to 65536 (inclusive) are supported */
+		DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
+		return -EINVAL;
+	}
+
+	req->using_dma = 1;
+
+	if (req->req.dma == DMA_ADDR_INVALID) {
+		req->req.dma = dma_map_single(
+			&udc->pdev->dev, req->req.buf, req->req.length,
+			ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		req->mapped = 1;
+	} else {
+		dma_sync_single_for_device(
+			&udc->pdev->dev, req->req.dma, req->req.length,
+			ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		req->mapped = 0;
+	}
+
+	req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
+			| USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
+			| USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
+
+	if (ep->is_in)
+		req->ctrl |= USBA_DMA_END_BUF_EN;
+
+	/*
+	 * Add this request to the queue and submit for DMA if
+	 * possible. Check if we're still alive first -- we may have
+	 * received a reset since last time we checked.
+	 */
+	ret = -ESHUTDOWN;
+	spin_lock_irqsave(&udc->lock, flags);
+	if (ep->desc) {
+		if (list_empty(&ep->queue))
+			submit_request(ep, req);
+
+		list_add_tail(&req->queue, &ep->queue);
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return ret;
+}
+
+static int
+usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct usba_request *req = to_usba_req(_req);
+	struct usba_ep *ep = to_usba_ep(_ep);
+	struct usba_udc *udc = ep->udc;
+	unsigned long flags;
+	int ret;
+
+	DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
+			ep->ep.name, req, _req->length);
+
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || !ep->desc)
+		return -ESHUTDOWN;
+
+	req->submitted = 0;
+	req->using_dma = 0;
+	req->last_transaction = 0;
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	if (ep->can_dma)
+		return queue_dma(udc, ep, req, gfp_flags);
+
+	/* May have received a reset since last time we checked */
+	ret = -ESHUTDOWN;
+	spin_lock_irqsave(&udc->lock, flags);
+	if (ep->desc) {
+		list_add_tail(&req->queue, &ep->queue);
+
+		if ((!ep_is_control(ep) && ep->is_in) ||
+			(ep_is_control(ep)
+				&& (ep->state == DATA_STAGE_IN
+					|| ep->state == STATUS_STAGE_IN)))
+			usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
+		else
+			usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return ret;
+}
+
+static void
+usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
+{
+	req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
+}
+
+static int stop_dma(struct usba_ep *ep, u32 *pstatus)
+{
+	unsigned int timeout;
+	u32 status;
+
+	/*
+	 * Stop the DMA controller. When writing both CH_EN
+	 * and LINK to 0, the other bits are not affected.
+	 */
+	usba_dma_writel(ep, CONTROL, 0);
+
+	/* Wait for the FIFO to empty */
+	for (timeout = 40; timeout; --timeout) {
+		status = usba_dma_readl(ep, STATUS);
+		if (!(status & USBA_DMA_CH_EN))
+			break;
+		udelay(1);
+	}
+
+	if (pstatus)
+		*pstatus = status;
+
+	if (timeout == 0) {
+		dev_err(&ep->udc->pdev->dev,
+			"%s: timed out waiting for DMA FIFO to empty\n",
+			ep->ep.name);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct usba_ep *ep = to_usba_ep(_ep);
+	struct usba_udc *udc = ep->udc;
+	struct usba_request *req = to_usba_req(_req);
+	unsigned long flags;
+	u32 status;
+
+	DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
+			ep->ep.name, req);
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	if (req->using_dma) {
+		/*
+		 * If this request is currently being transferred,
+		 * stop the DMA controller and reset the FIFO.
+		 */
+		if (ep->queue.next == &req->queue) {
+			status = usba_dma_readl(ep, STATUS);
+			if (status & USBA_DMA_CH_EN)
+				stop_dma(ep, &status);
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+			ep->last_dma_status = status;
+#endif
+
+			usba_writel(udc, EPT_RST, 1 << ep->index);
+
+			usba_update_req(ep, req, status);
+		}
+	}
+
+	/*
+	 * Errors should stop the queue from advancing until the
+	 * completion function returns.
+	 */
+	list_del_init(&req->queue);
+
+	request_complete(ep, req, -ECONNRESET);
+
+	/* Process the next request if any */
+	submit_next_request(ep);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static int usba_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct usba_ep *ep = to_usba_ep(_ep);
+	struct usba_udc *udc = ep->udc;
+	unsigned long flags;
+	int ret = 0;
+
+	DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
+			value ? "set" : "clear");
+
+	if (!ep->desc) {
+		DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
+				ep->ep.name);
+		return -ENODEV;
+	}
+	if (ep->is_isoc) {
+		DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
+				ep->ep.name);
+		return -ENOTTY;
+	}
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/*
+	 * We can't halt IN endpoints while there are still data to be
+	 * transferred
+	 */
+	if (!list_empty(&ep->queue)
+			|| ((value && ep->is_in && (usba_ep_readl(ep, STA)
+					& USBA_BF(BUSY_BANKS, -1L))))) {
+		ret = -EAGAIN;
+	} else {
+		if (value)
+			usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
+		else
+			usba_ep_writel(ep, CLR_STA,
+					USBA_FORCE_STALL | USBA_TOGGLE_CLR);
+		usba_ep_readl(ep, STA);
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return ret;
+}
+
+static int usba_ep_fifo_status(struct usb_ep *_ep)
+{
+	struct usba_ep *ep = to_usba_ep(_ep);
+
+	return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
+}
+
+static void usba_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct usba_ep *ep = to_usba_ep(_ep);
+	struct usba_udc *udc = ep->udc;
+
+	usba_writel(udc, EPT_RST, 1 << ep->index);
+}
+
+static const struct usb_ep_ops usba_ep_ops = {
+	.enable		= usba_ep_enable,
+	.disable	= usba_ep_disable,
+	.alloc_request	= usba_ep_alloc_request,
+	.free_request	= usba_ep_free_request,
+	.queue		= usba_ep_queue,
+	.dequeue	= usba_ep_dequeue,
+	.set_halt	= usba_ep_set_halt,
+	.fifo_status	= usba_ep_fifo_status,
+	.fifo_flush	= usba_ep_fifo_flush,
+};
+
+static int usba_udc_get_frame(struct usb_gadget *gadget)
+{
+	struct usba_udc *udc = to_usba_udc(gadget);
+
+	return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
+}
+
+static int usba_udc_wakeup(struct usb_gadget *gadget)
+{
+	struct usba_udc *udc = to_usba_udc(gadget);
+	unsigned long flags;
+	u32 ctrl;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
+		ctrl = usba_readl(udc, CTRL);
+		usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return ret;
+}
+
+static int
+usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
+{
+	struct usba_udc *udc = to_usba_udc(gadget);
+	unsigned long flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	if (is_selfpowered)
+		udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
+	else
+		udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static int atmel_usba_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int atmel_usba_stop(struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops usba_udc_ops = {
+	.get_frame		= usba_udc_get_frame,
+	.wakeup			= usba_udc_wakeup,
+	.set_selfpowered	= usba_udc_set_selfpowered,
+	.start			= atmel_usba_start,
+	.stop			= atmel_usba_stop,
+};
+
+static struct usb_endpoint_descriptor usba_ep0_desc = {
+	.bLength = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+	.bEndpointAddress = 0,
+	.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize = cpu_to_le16(64),
+	/* FIXME: I have no idea what to put here */
+	.bInterval = 1,
+};
+
+static void nop_release(struct device *dev)
+{
+
+}
+
+static struct usba_udc the_udc = {
+	.gadget	= {
+		.ops		= &usba_udc_ops,
+		.ep_list	= LIST_HEAD_INIT(the_udc.gadget.ep_list),
+		.max_speed	= USB_SPEED_HIGH,
+		.name		= "atmel_usba_udc",
+		.dev	= {
+			.init_name	= "gadget",
+			.release	= nop_release,
+		},
+	},
+};
+
+/*
+ * Called with interrupts disabled and udc->lock held.
+ */
+static void reset_all_endpoints(struct usba_udc *udc)
+{
+	struct usba_ep *ep;
+	struct usba_request *req, *tmp_req;
+
+	usba_writel(udc, EPT_RST, ~0UL);
+
+	ep = to_usba_ep(udc->gadget.ep0);
+	list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
+		list_del_init(&req->queue);
+		request_complete(ep, req, -ECONNRESET);
+	}
+
+	/* NOTE:  normally, the next call to the gadget driver is in
+	 * charge of disabling endpoints... usually disconnect().
+	 * The exception would be entering a high speed test mode.
+	 *
+	 * FIXME remove this code ... and retest thoroughly.
+	 */
+	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+		if (ep->desc) {
+			spin_unlock(&udc->lock);
+			usba_ep_disable(&ep->ep);
+			spin_lock(&udc->lock);
+		}
+	}
+}
+
+static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
+{
+	struct usba_ep *ep;
+
+	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+		return to_usba_ep(udc->gadget.ep0);
+
+	list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
+		u8 bEndpointAddress;
+
+		if (!ep->desc)
+			continue;
+		bEndpointAddress = ep->desc->bEndpointAddress;
+		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+			continue;
+		if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
+				== (wIndex & USB_ENDPOINT_NUMBER_MASK))
+			return ep;
+	}
+
+	return NULL;
+}
+
+/* Called with interrupts disabled and udc->lock held */
+static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
+{
+	usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
+	ep->state = WAIT_FOR_SETUP;
+}
+
+static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
+{
+	if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
+		return 1;
+	return 0;
+}
+
+static inline void set_address(struct usba_udc *udc, unsigned int addr)
+{
+	u32 regval;
+
+	DBG(DBG_BUS, "setting address %u...\n", addr);
+	regval = usba_readl(udc, CTRL);
+	regval = USBA_BFINS(DEV_ADDR, addr, regval);
+	usba_writel(udc, CTRL, regval);
+}
+
+static int do_test_mode(struct usba_udc *udc)
+{
+	static const char test_packet_buffer[] = {
+		/* JKJKJKJK * 9 */
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		/* JJKKJJKK * 8 */
+		0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+		/* JJKKJJKK * 8 */
+		0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
+		/* JJJJJJJKKKKKKK * 8 */
+		0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		/* JJJJJJJK * 8 */
+		0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
+		/* {JKKKKKKK * 10}, JK */
+		0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
+	};
+	struct usba_ep *ep;
+	struct device *dev = &udc->pdev->dev;
+	int test_mode;
+
+	test_mode = udc->test_mode;
+
+	/* Start from a clean slate */
+	reset_all_endpoints(udc);
+
+	switch (test_mode) {
+	case 0x0100:
+		/* Test_J */
+		usba_writel(udc, TST, USBA_TST_J_MODE);
+		dev_info(dev, "Entering Test_J mode...\n");
+		break;
+	case 0x0200:
+		/* Test_K */
+		usba_writel(udc, TST, USBA_TST_K_MODE);
+		dev_info(dev, "Entering Test_K mode...\n");
+		break;
+	case 0x0300:
+		/*
+		 * Test_SE0_NAK: Force high-speed mode and set up ep0
+		 * for Bulk IN transfers
+		 */
+		ep = &usba_ep[0];
+		usba_writel(udc, TST,
+				USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
+		usba_ep_writel(ep, CFG,
+				USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
+				| USBA_EPT_DIR_IN
+				| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
+				| USBA_BF(BK_NUMBER, 1));
+		if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
+			set_protocol_stall(udc, ep);
+			dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
+		} else {
+			usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
+			dev_info(dev, "Entering Test_SE0_NAK mode...\n");
+		}
+		break;
+	case 0x0400:
+		/* Test_Packet */
+		ep = &usba_ep[0];
+		usba_ep_writel(ep, CFG,
+				USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
+				| USBA_EPT_DIR_IN
+				| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
+				| USBA_BF(BK_NUMBER, 1));
+		if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
+			set_protocol_stall(udc, ep);
+			dev_err(dev, "Test_Packet: ep0 not mapped\n");
+		} else {
+			usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
+			usba_writel(udc, TST, USBA_TST_PKT_MODE);
+			memcpy_toio(ep->fifo, test_packet_buffer,
+					sizeof(test_packet_buffer));
+			usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
+			dev_info(dev, "Entering Test_Packet mode...\n");
+		}
+		break;
+	default:
+		dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Avoid overly long expressions */
+static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
+{
+	if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
+		return true;
+	return false;
+}
+
+static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
+{
+	if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
+		return true;
+	return false;
+}
+
+static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
+{
+	if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
+		return true;
+	return false;
+}
+
+static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
+		struct usb_ctrlrequest *crq)
+{
+	int retval = 0;
+
+	switch (crq->bRequest) {
+	case USB_REQ_GET_STATUS: {
+		u16 status;
+
+		if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
+			status = cpu_to_le16(udc->devstatus);
+		} else if (crq->bRequestType
+				== (USB_DIR_IN | USB_RECIP_INTERFACE)) {
+			status = cpu_to_le16(0);
+		} else if (crq->bRequestType
+				== (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
+			struct usba_ep *target;
+
+			target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
+			if (!target)
+				goto stall;
+
+			status = 0;
+			if (is_stalled(udc, target))
+				status |= cpu_to_le16(1);
+		} else
+			goto delegate;
+
+		/* Write directly to the FIFO. No queueing is done. */
+		if (crq->wLength != cpu_to_le16(sizeof(status)))
+			goto stall;
+		ep->state = DATA_STAGE_IN;
+		__raw_writew(status, ep->fifo);
+		usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
+		break;
+	}
+
+	case USB_REQ_CLEAR_FEATURE: {
+		if (crq->bRequestType == USB_RECIP_DEVICE) {
+			if (feature_is_dev_remote_wakeup(crq))
+				udc->devstatus
+					&= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
+			else
+				/* Can't CLEAR_FEATURE TEST_MODE */
+				goto stall;
+		} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
+			struct usba_ep *target;
+
+			if (crq->wLength != cpu_to_le16(0)
+					|| !feature_is_ep_halt(crq))
+				goto stall;
+			target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
+			if (!target)
+				goto stall;
+
+			usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
+			if (target->index != 0)
+				usba_ep_writel(target, CLR_STA,
+						USBA_TOGGLE_CLR);
+		} else {
+			goto delegate;
+		}
+
+		send_status(udc, ep);
+		break;
+	}
+
+	case USB_REQ_SET_FEATURE: {
+		if (crq->bRequestType == USB_RECIP_DEVICE) {
+			if (feature_is_dev_test_mode(crq)) {
+				send_status(udc, ep);
+				ep->state = STATUS_STAGE_TEST;
+				udc->test_mode = le16_to_cpu(crq->wIndex);
+				return 0;
+			} else if (feature_is_dev_remote_wakeup(crq)) {
+				udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
+			} else {
+				goto stall;
+			}
+		} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
+			struct usba_ep *target;
+
+			if (crq->wLength != cpu_to_le16(0)
+					|| !feature_is_ep_halt(crq))
+				goto stall;
+
+			target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
+			if (!target)
+				goto stall;
+
+			usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
+		} else
+			goto delegate;
+
+		send_status(udc, ep);
+		break;
+	}
+
+	case USB_REQ_SET_ADDRESS:
+		if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
+			goto delegate;
+
+		set_address(udc, le16_to_cpu(crq->wValue));
+		send_status(udc, ep);
+		ep->state = STATUS_STAGE_ADDR;
+		break;
+
+	default:
+delegate:
+		spin_unlock(&udc->lock);
+		retval = udc->driver->setup(&udc->gadget, crq);
+		spin_lock(&udc->lock);
+	}
+
+	return retval;
+
+stall:
+	pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
+		"halting endpoint...\n",
+		ep->ep.name, crq->bRequestType, crq->bRequest,
+		le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
+		le16_to_cpu(crq->wLength));
+	set_protocol_stall(udc, ep);
+	return -1;
+}
+
+static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
+{
+	struct usba_request *req;
+	u32 epstatus;
+	u32 epctrl;
+
+restart:
+	epstatus = usba_ep_readl(ep, STA);
+	epctrl = usba_ep_readl(ep, CTL);
+
+	DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
+			ep->ep.name, ep->state, epstatus, epctrl);
+
+	req = NULL;
+	if (!list_empty(&ep->queue))
+		req = list_entry(ep->queue.next,
+				 struct usba_request, queue);
+
+	if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
+		if (req->submitted)
+			next_fifo_transaction(ep, req);
+		else
+			submit_request(ep, req);
+
+		if (req->last_transaction) {
+			usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
+			usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+		}
+		goto restart;
+	}
+	if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
+		usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
+
+		switch (ep->state) {
+		case DATA_STAGE_IN:
+			usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
+			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+			ep->state = STATUS_STAGE_OUT;
+			break;
+		case STATUS_STAGE_ADDR:
+			/* Activate our new address */
+			usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
+						| USBA_FADDR_EN));
+			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+			ep->state = WAIT_FOR_SETUP;
+			break;
+		case STATUS_STAGE_IN:
+			if (req) {
+				list_del_init(&req->queue);
+				request_complete(ep, req, 0);
+				submit_next_request(ep);
+			}
+			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+			ep->state = WAIT_FOR_SETUP;
+			break;
+		case STATUS_STAGE_TEST:
+			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+			ep->state = WAIT_FOR_SETUP;
+			if (do_test_mode(udc))
+				set_protocol_stall(udc, ep);
+			break;
+		default:
+			pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
+				"halting endpoint...\n",
+				ep->ep.name, ep->state);
+			set_protocol_stall(udc, ep);
+			break;
+		}
+
+		goto restart;
+	}
+	if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
+		switch (ep->state) {
+		case STATUS_STAGE_OUT:
+			usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
+			usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+
+			if (req) {
+				list_del_init(&req->queue);
+				request_complete(ep, req, 0);
+			}
+			ep->state = WAIT_FOR_SETUP;
+			break;
+
+		case DATA_STAGE_OUT:
+			receive_data(ep);
+			break;
+
+		default:
+			usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
+			usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+			pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
+				"halting endpoint...\n",
+				ep->ep.name, ep->state);
+			set_protocol_stall(udc, ep);
+			break;
+		}
+
+		goto restart;
+	}
+	if (epstatus & USBA_RX_SETUP) {
+		union {
+			struct usb_ctrlrequest crq;
+			unsigned long data[2];
+		} crq;
+		unsigned int pkt_len;
+		int ret;
+
+		if (ep->state != WAIT_FOR_SETUP) {
+			/*
+			 * Didn't expect a SETUP packet at this
+			 * point. Clean up any pending requests (which
+			 * may be successful).
+			 */
+			int status = -EPROTO;
+
+			/*
+			 * RXRDY and TXCOMP are dropped when SETUP
+			 * packets arrive.  Just pretend we received
+			 * the status packet.
+			 */
+			if (ep->state == STATUS_STAGE_OUT
+					|| ep->state == STATUS_STAGE_IN) {
+				usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+				status = 0;
+			}
+
+			if (req) {
+				list_del_init(&req->queue);
+				request_complete(ep, req, status);
+			}
+		}
+
+		pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
+		DBG(DBG_HW, "Packet length: %u\n", pkt_len);
+		if (pkt_len != sizeof(crq)) {
+			pr_warning("udc: Invalid packet length %u "
+				"(expected %zu)\n", pkt_len, sizeof(crq));
+			set_protocol_stall(udc, ep);
+			return;
+		}
+
+		DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
+		memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
+
+		/* Free up one bank in the FIFO so that we can
+		 * generate or receive a reply right away. */
+		usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
+
+		/* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
+			ep->state, crq.crq.bRequestType,
+			crq.crq.bRequest); */
+
+		if (crq.crq.bRequestType & USB_DIR_IN) {
+			/*
+			 * The USB 2.0 spec states that "if wLength is
+			 * zero, there is no data transfer phase."
+			 * However, testusb #14 seems to actually
+			 * expect a data phase even if wLength = 0...
+			 */
+			ep->state = DATA_STAGE_IN;
+		} else {
+			if (crq.crq.wLength != cpu_to_le16(0))
+				ep->state = DATA_STAGE_OUT;
+			else
+				ep->state = STATUS_STAGE_IN;
+		}
+
+		ret = -1;
+		if (ep->index == 0)
+			ret = handle_ep0_setup(udc, ep, &crq.crq);
+		else {
+			spin_unlock(&udc->lock);
+			ret = udc->driver->setup(&udc->gadget, &crq.crq);
+			spin_lock(&udc->lock);
+		}
+
+		DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
+			crq.crq.bRequestType, crq.crq.bRequest,
+			le16_to_cpu(crq.crq.wLength), ep->state, ret);
+
+		if (ret < 0) {
+			/* Let the host know that we failed */
+			set_protocol_stall(udc, ep);
+		}
+	}
+}
+
+static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
+{
+	struct usba_request *req;
+	u32 epstatus;
+	u32 epctrl;
+
+	epstatus = usba_ep_readl(ep, STA);
+	epctrl = usba_ep_readl(ep, CTL);
+
+	DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
+
+	while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
+		DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
+
+		if (list_empty(&ep->queue)) {
+			dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
+			usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
+			return;
+		}
+
+		req = list_entry(ep->queue.next, struct usba_request, queue);
+
+		if (req->using_dma) {
+			/* Send a zero-length packet */
+			usba_ep_writel(ep, SET_STA,
+					USBA_TX_PK_RDY);
+			usba_ep_writel(ep, CTL_DIS,
+					USBA_TX_PK_RDY);
+			list_del_init(&req->queue);
+			submit_next_request(ep);
+			request_complete(ep, req, 0);
+		} else {
+			if (req->submitted)
+				next_fifo_transaction(ep, req);
+			else
+				submit_request(ep, req);
+
+			if (req->last_transaction) {
+				list_del_init(&req->queue);
+				submit_next_request(ep);
+				request_complete(ep, req, 0);
+			}
+		}
+
+		epstatus = usba_ep_readl(ep, STA);
+		epctrl = usba_ep_readl(ep, CTL);
+	}
+	if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
+		DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
+		receive_data(ep);
+		usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
+	}
+}
+
+static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
+{
+	struct usba_request *req;
+	u32 status, control, pending;
+
+	status = usba_dma_readl(ep, STATUS);
+	control = usba_dma_readl(ep, CONTROL);
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+	ep->last_dma_status = status;
+#endif
+	pending = status & control;
+	DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
+
+	if (status & USBA_DMA_CH_EN) {
+		dev_err(&udc->pdev->dev,
+			"DMA_CH_EN is set after transfer is finished!\n");
+		dev_err(&udc->pdev->dev,
+			"status=%#08x, pending=%#08x, control=%#08x\n",
+			status, pending, control);
+
+		/*
+		 * try to pretend nothing happened. We might have to
+		 * do something here...
+		 */
+	}
+
+	if (list_empty(&ep->queue))
+		/* Might happen if a reset comes along at the right moment */
+		return;
+
+	if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
+		req = list_entry(ep->queue.next, struct usba_request, queue);
+		usba_update_req(ep, req, status);
+
+		list_del_init(&req->queue);
+		submit_next_request(ep);
+		request_complete(ep, req, 0);
+	}
+}
+
+static irqreturn_t usba_udc_irq(int irq, void *devid)
+{
+	struct usba_udc *udc = devid;
+	u32 status;
+	u32 dma_status;
+	u32 ep_status;
+
+	spin_lock(&udc->lock);
+
+	status = usba_readl(udc, INT_STA);
+	DBG(DBG_INT, "irq, status=%#08x\n", status);
+
+	if (status & USBA_DET_SUSPEND) {
+		toggle_bias(0);
+		usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
+		DBG(DBG_BUS, "Suspend detected\n");
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN
+				&& udc->driver && udc->driver->suspend) {
+			spin_unlock(&udc->lock);
+			udc->driver->suspend(&udc->gadget);
+			spin_lock(&udc->lock);
+		}
+	}
+
+	if (status & USBA_WAKE_UP) {
+		toggle_bias(1);
+		usba_writel(udc, INT_CLR, USBA_WAKE_UP);
+		DBG(DBG_BUS, "Wake Up CPU detected\n");
+	}
+
+	if (status & USBA_END_OF_RESUME) {
+		usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
+		DBG(DBG_BUS, "Resume detected\n");
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN
+				&& udc->driver && udc->driver->resume) {
+			spin_unlock(&udc->lock);
+			udc->driver->resume(&udc->gadget);
+			spin_lock(&udc->lock);
+		}
+	}
+
+	dma_status = USBA_BFEXT(DMA_INT, status);
+	if (dma_status) {
+		int i;
+
+		for (i = 1; i < USBA_NR_ENDPOINTS; i++)
+			if (dma_status & (1 << i))
+				usba_dma_irq(udc, &usba_ep[i]);
+	}
+
+	ep_status = USBA_BFEXT(EPT_INT, status);
+	if (ep_status) {
+		int i;
+
+		for (i = 0; i < USBA_NR_ENDPOINTS; i++)
+			if (ep_status & (1 << i)) {
+				if (ep_is_control(&usba_ep[i]))
+					usba_control_irq(udc, &usba_ep[i]);
+				else
+					usba_ep_irq(udc, &usba_ep[i]);
+			}
+	}
+
+	if (status & USBA_END_OF_RESET) {
+		struct usba_ep *ep0;
+
+		usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
+		reset_all_endpoints(udc);
+
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN
+				&& udc->driver->disconnect) {
+			udc->gadget.speed = USB_SPEED_UNKNOWN;
+			spin_unlock(&udc->lock);
+			udc->driver->disconnect(&udc->gadget);
+			spin_lock(&udc->lock);
+		}
+
+		if (status & USBA_HIGH_SPEED)
+			udc->gadget.speed = USB_SPEED_HIGH;
+		else
+			udc->gadget.speed = USB_SPEED_FULL;
+		DBG(DBG_BUS, "%s bus reset detected\n",
+		    usb_speed_string(udc->gadget.speed));
+
+		ep0 = &usba_ep[0];
+		ep0->desc = &usba_ep0_desc;
+		ep0->state = WAIT_FOR_SETUP;
+		usba_ep_writel(ep0, CFG,
+				(USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
+				| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
+				| USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
+		usba_ep_writel(ep0, CTL_ENB,
+				USBA_EPT_ENABLE | USBA_RX_SETUP);
+		usba_writel(udc, INT_ENB,
+				(usba_readl(udc, INT_ENB)
+				| USBA_BF(EPT_INT, 1)
+				| USBA_DET_SUSPEND
+				| USBA_END_OF_RESUME));
+
+		/*
+		 * Unclear why we hit this irregularly, e.g. in usbtest,
+		 * but it's clearly harmless...
+		 */
+		if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
+			dev_dbg(&udc->pdev->dev,
+				 "ODD: EP0 configuration is invalid!\n");
+	}
+
+	spin_unlock(&udc->lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t usba_vbus_irq(int irq, void *devid)
+{
+	struct usba_udc *udc = devid;
+	int vbus;
+
+	/* debounce */
+	udelay(10);
+
+	spin_lock(&udc->lock);
+
+	/* May happen if Vbus pin toggles during probe() */
+	if (!udc->driver)
+		goto out;
+
+	vbus = vbus_is_present(udc);
+	if (vbus != udc->vbus_prev) {
+		if (vbus) {
+			toggle_bias(1);
+			usba_writel(udc, CTRL, USBA_ENABLE_MASK);
+			usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
+		} else {
+			udc->gadget.speed = USB_SPEED_UNKNOWN;
+			reset_all_endpoints(udc);
+			toggle_bias(0);
+			usba_writel(udc, CTRL, USBA_DISABLE_MASK);
+			if (udc->driver->disconnect) {
+				spin_unlock(&udc->lock);
+				udc->driver->disconnect(&udc->gadget);
+				spin_lock(&udc->lock);
+			}
+		}
+		udc->vbus_prev = vbus;
+	}
+
+out:
+	spin_unlock(&udc->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int atmel_usba_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct usba_udc *udc = &the_udc;
+	unsigned long flags;
+	int ret;
+
+	if (!udc->pdev)
+		return -ENODEV;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	if (udc->driver) {
+		spin_unlock_irqrestore(&udc->lock, flags);
+		return -EBUSY;
+	}
+
+	udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
+	udc->driver = driver;
+	udc->gadget.dev.driver = &driver->driver;
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	clk_enable(udc->pclk);
+	clk_enable(udc->hclk);
+
+	ret = bind(&udc->gadget);
+	if (ret) {
+		DBG(DBG_ERR, "Could not bind to driver %s: error %d\n",
+			driver->driver.name, ret);
+		goto err_driver_bind;
+	}
+
+	DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
+
+	udc->vbus_prev = 0;
+	if (gpio_is_valid(udc->vbus_pin))
+		enable_irq(gpio_to_irq(udc->vbus_pin));
+
+	/* If Vbus is present, enable the controller and wait for reset */
+	spin_lock_irqsave(&udc->lock, flags);
+	if (vbus_is_present(udc) && udc->vbus_prev == 0) {
+		toggle_bias(1);
+		usba_writel(udc, CTRL, USBA_ENABLE_MASK);
+		usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+
+err_driver_bind:
+	udc->driver = NULL;
+	udc->gadget.dev.driver = NULL;
+	return ret;
+}
+
+static int atmel_usba_stop(struct usb_gadget_driver *driver)
+{
+	struct usba_udc *udc = &the_udc;
+	unsigned long flags;
+
+	if (!udc->pdev)
+		return -ENODEV;
+	if (driver != udc->driver || !driver->unbind)
+		return -EINVAL;
+
+	if (gpio_is_valid(udc->vbus_pin))
+		disable_irq(gpio_to_irq(udc->vbus_pin));
+
+	spin_lock_irqsave(&udc->lock, flags);
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	reset_all_endpoints(udc);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	/* This will also disable the DP pullup */
+	toggle_bias(0);
+	usba_writel(udc, CTRL, USBA_DISABLE_MASK);
+
+	if (udc->driver->disconnect)
+		udc->driver->disconnect(&udc->gadget);
+
+	driver->unbind(&udc->gadget);
+	udc->gadget.dev.driver = NULL;
+	udc->driver = NULL;
+
+	clk_disable(udc->hclk);
+	clk_disable(udc->pclk);
+
+	DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
+
+	return 0;
+}
+
+static int __init usba_udc_probe(struct platform_device *pdev)
+{
+	struct usba_platform_data *pdata = pdev->dev.platform_data;
+	struct resource *regs, *fifo;
+	struct clk *pclk, *hclk;
+	struct usba_udc *udc = &the_udc;
+	int irq, ret, i;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
+	fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
+	if (!regs || !fifo || !pdata)
+		return -ENXIO;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	pclk = clk_get(&pdev->dev, "pclk");
+	if (IS_ERR(pclk))
+		return PTR_ERR(pclk);
+	hclk = clk_get(&pdev->dev, "hclk");
+	if (IS_ERR(hclk)) {
+		ret = PTR_ERR(hclk);
+		goto err_get_hclk;
+	}
+
+	spin_lock_init(&udc->lock);
+	udc->pdev = pdev;
+	udc->pclk = pclk;
+	udc->hclk = hclk;
+	udc->vbus_pin = -ENODEV;
+
+	ret = -ENOMEM;
+	udc->regs = ioremap(regs->start, resource_size(regs));
+	if (!udc->regs) {
+		dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
+		goto err_map_regs;
+	}
+	dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
+		 (unsigned long)regs->start, udc->regs);
+	udc->fifo = ioremap(fifo->start, resource_size(fifo));
+	if (!udc->fifo) {
+		dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
+		goto err_map_fifo;
+	}
+	dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
+		 (unsigned long)fifo->start, udc->fifo);
+
+	device_initialize(&udc->gadget.dev);
+	udc->gadget.dev.parent = &pdev->dev;
+	udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+	platform_set_drvdata(pdev, udc);
+
+	/* Make sure we start from a clean slate */
+	clk_enable(pclk);
+	toggle_bias(0);
+	usba_writel(udc, CTRL, USBA_DISABLE_MASK);
+	clk_disable(pclk);
+
+	usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
+			  GFP_KERNEL);
+	if (!usba_ep)
+		goto err_alloc_ep;
+
+	the_udc.gadget.ep0 = &usba_ep[0].ep;
+
+	INIT_LIST_HEAD(&usba_ep[0].ep.ep_list);
+	usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0);
+	usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0);
+	usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0);
+	usba_ep[0].ep.ops = &usba_ep_ops;
+	usba_ep[0].ep.name = pdata->ep[0].name;
+	usba_ep[0].ep.maxpacket = pdata->ep[0].fifo_size;
+	usba_ep[0].udc = &the_udc;
+	INIT_LIST_HEAD(&usba_ep[0].queue);
+	usba_ep[0].fifo_size = pdata->ep[0].fifo_size;
+	usba_ep[0].nr_banks = pdata->ep[0].nr_banks;
+	usba_ep[0].index = pdata->ep[0].index;
+	usba_ep[0].can_dma = pdata->ep[0].can_dma;
+	usba_ep[0].can_isoc = pdata->ep[0].can_isoc;
+
+	for (i = 1; i < pdata->num_ep; i++) {
+		struct usba_ep *ep = &usba_ep[i];
+
+		ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
+		ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
+		ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
+		ep->ep.ops = &usba_ep_ops;
+		ep->ep.name = pdata->ep[i].name;
+		ep->ep.maxpacket = pdata->ep[i].fifo_size;
+		ep->udc = &the_udc;
+		INIT_LIST_HEAD(&ep->queue);
+		ep->fifo_size = pdata->ep[i].fifo_size;
+		ep->nr_banks = pdata->ep[i].nr_banks;
+		ep->index = pdata->ep[i].index;
+		ep->can_dma = pdata->ep[i].can_dma;
+		ep->can_isoc = pdata->ep[i].can_isoc;
+
+		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+	}
+
+	ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
+	if (ret) {
+		dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
+			irq, ret);
+		goto err_request_irq;
+	}
+	udc->irq = irq;
+
+	ret = device_add(&udc->gadget.dev);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Could not add gadget: %d\n", ret);
+		goto err_device_add;
+	}
+
+	if (gpio_is_valid(pdata->vbus_pin)) {
+		if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
+			udc->vbus_pin = pdata->vbus_pin;
+			udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
+
+			ret = request_irq(gpio_to_irq(udc->vbus_pin),
+					usba_vbus_irq, 0,
+					"atmel_usba_udc", udc);
+			if (ret) {
+				gpio_free(udc->vbus_pin);
+				udc->vbus_pin = -ENODEV;
+				dev_warn(&udc->pdev->dev,
+					 "failed to request vbus irq; "
+					 "assuming always on\n");
+			} else {
+				disable_irq(gpio_to_irq(udc->vbus_pin));
+			}
+		} else {
+			/* gpio_request fail so use -EINVAL for gpio_is_valid */
+			udc->vbus_pin = -EINVAL;
+		}
+	}
+
+	ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+	if (ret)
+		goto err_add_udc;
+
+	usba_init_debugfs(udc);
+	for (i = 1; i < pdata->num_ep; i++)
+		usba_ep_init_debugfs(udc, &usba_ep[i]);
+
+	return 0;
+
+err_add_udc:
+	if (gpio_is_valid(pdata->vbus_pin)) {
+		free_irq(gpio_to_irq(udc->vbus_pin), udc);
+		gpio_free(udc->vbus_pin);
+	}
+
+	device_unregister(&udc->gadget.dev);
+
+err_device_add:
+	free_irq(irq, udc);
+err_request_irq:
+	kfree(usba_ep);
+err_alloc_ep:
+	iounmap(udc->fifo);
+err_map_fifo:
+	iounmap(udc->regs);
+err_map_regs:
+	clk_put(hclk);
+err_get_hclk:
+	clk_put(pclk);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return ret;
+}
+
+static int __exit usba_udc_remove(struct platform_device *pdev)
+{
+	struct usba_udc *udc;
+	int i;
+	struct usba_platform_data *pdata = pdev->dev.platform_data;
+
+	udc = platform_get_drvdata(pdev);
+
+	usb_del_gadget_udc(&udc->gadget);
+
+	for (i = 1; i < pdata->num_ep; i++)
+		usba_ep_cleanup_debugfs(&usba_ep[i]);
+	usba_cleanup_debugfs(udc);
+
+	if (gpio_is_valid(udc->vbus_pin)) {
+		free_irq(gpio_to_irq(udc->vbus_pin), udc);
+		gpio_free(udc->vbus_pin);
+	}
+
+	free_irq(udc->irq, udc);
+	kfree(usba_ep);
+	iounmap(udc->fifo);
+	iounmap(udc->regs);
+	clk_put(udc->hclk);
+	clk_put(udc->pclk);
+
+	device_unregister(&udc->gadget.dev);
+
+	return 0;
+}
+
+static struct platform_driver udc_driver = {
+	.remove		= __exit_p(usba_udc_remove),
+	.driver		= {
+		.name		= "atmel_usba_udc",
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init udc_init(void)
+{
+	return platform_driver_probe(&udc_driver, usba_udc_probe);
+}
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+	platform_driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION("Atmel USBA UDC driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel_usba_udc");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/atmel_usba_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/atmel_usba_udc.h
new file mode 100644
index 0000000..88a2e07
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/atmel_usba_udc.h
@@ -0,0 +1,359 @@
+/*
+ * Driver for the Atmel USBA high speed USB device controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_USB_GADGET_USBA_UDC_H__
+#define __LINUX_USB_GADGET_USBA_UDC_H__
+
+/* USB register offsets */
+#define USBA_CTRL				0x0000
+#define USBA_FNUM				0x0004
+#define USBA_INT_ENB				0x0010
+#define USBA_INT_STA				0x0014
+#define USBA_INT_CLR				0x0018
+#define USBA_EPT_RST				0x001c
+#define USBA_TST				0x00e0
+
+/* USB endpoint register offsets */
+#define USBA_EPT_CFG				0x0000
+#define USBA_EPT_CTL_ENB			0x0004
+#define USBA_EPT_CTL_DIS			0x0008
+#define USBA_EPT_CTL				0x000c
+#define USBA_EPT_SET_STA			0x0014
+#define USBA_EPT_CLR_STA			0x0018
+#define USBA_EPT_STA				0x001c
+
+/* USB DMA register offsets */
+#define USBA_DMA_NXT_DSC			0x0000
+#define USBA_DMA_ADDRESS			0x0004
+#define USBA_DMA_CONTROL			0x0008
+#define USBA_DMA_STATUS				0x000c
+
+/* Bitfields in CTRL */
+#define USBA_DEV_ADDR_OFFSET			0
+#define USBA_DEV_ADDR_SIZE			7
+#define USBA_FADDR_EN				(1 <<  7)
+#define USBA_EN_USBA				(1 <<  8)
+#define USBA_DETACH				(1 <<  9)
+#define USBA_REMOTE_WAKE_UP			(1 << 10)
+#define USBA_PULLD_DIS				(1 << 11)
+
+#if defined(CONFIG_AVR32)
+#define USBA_ENABLE_MASK			USBA_EN_USBA
+#define USBA_DISABLE_MASK			0
+#elif defined(CONFIG_ARCH_AT91)
+#define USBA_ENABLE_MASK			(USBA_EN_USBA | USBA_PULLD_DIS)
+#define USBA_DISABLE_MASK			USBA_DETACH
+#endif /* CONFIG_ARCH_AT91 */
+
+/* Bitfields in FNUM */
+#define USBA_MICRO_FRAME_NUM_OFFSET		0
+#define USBA_MICRO_FRAME_NUM_SIZE		3
+#define USBA_FRAME_NUMBER_OFFSET		3
+#define USBA_FRAME_NUMBER_SIZE			11
+#define USBA_FRAME_NUM_ERROR			(1 << 31)
+
+/* Bitfields in INT_ENB/INT_STA/INT_CLR */
+#define USBA_HIGH_SPEED				(1 <<  0)
+#define USBA_DET_SUSPEND			(1 <<  1)
+#define USBA_MICRO_SOF				(1 <<  2)
+#define USBA_SOF				(1 <<  3)
+#define USBA_END_OF_RESET			(1 <<  4)
+#define USBA_WAKE_UP				(1 <<  5)
+#define USBA_END_OF_RESUME			(1 <<  6)
+#define USBA_UPSTREAM_RESUME			(1 <<  7)
+#define USBA_EPT_INT_OFFSET			8
+#define USBA_EPT_INT_SIZE			16
+#define USBA_DMA_INT_OFFSET			24
+#define USBA_DMA_INT_SIZE			8
+
+/* Bitfields in EPT_RST */
+#define USBA_RST_OFFSET				0
+#define USBA_RST_SIZE				16
+
+/* Bitfields in USBA_TST */
+#define USBA_SPEED_CFG_OFFSET			0
+#define USBA_SPEED_CFG_SIZE			2
+#define USBA_TST_J_MODE				(1 <<  2)
+#define USBA_TST_K_MODE				(1 <<  3)
+#define USBA_TST_PKT_MODE			(1 <<  4)
+#define USBA_OPMODE2				(1 <<  5)
+
+/* Bitfields in EPT_CFG */
+#define USBA_EPT_SIZE_OFFSET			0
+#define USBA_EPT_SIZE_SIZE			3
+#define USBA_EPT_DIR_IN				(1 <<  3)
+#define USBA_EPT_TYPE_OFFSET			4
+#define USBA_EPT_TYPE_SIZE			2
+#define USBA_BK_NUMBER_OFFSET			6
+#define USBA_BK_NUMBER_SIZE			2
+#define USBA_NB_TRANS_OFFSET			8
+#define USBA_NB_TRANS_SIZE			2
+#define USBA_EPT_MAPPED				(1 << 31)
+
+/* Bitfields in EPT_CTL/EPT_CTL_ENB/EPT_CTL_DIS */
+#define USBA_EPT_ENABLE				(1 <<  0)
+#define USBA_AUTO_VALID				(1 <<  1)
+#define USBA_INTDIS_DMA				(1 <<  3)
+#define USBA_NYET_DIS				(1 <<  4)
+#define USBA_DATAX_RX				(1 <<  6)
+#define USBA_MDATA_RX				(1 <<  7)
+/* Bits 8-15 and 31 enable interrupts for respective bits in EPT_STA */
+#define USBA_BUSY_BANK_IE			(1 << 18)
+
+/* Bitfields in EPT_SET_STA/EPT_CLR_STA/EPT_STA */
+#define USBA_FORCE_STALL			(1 <<  5)
+#define USBA_TOGGLE_CLR				(1 <<  6)
+#define USBA_TOGGLE_SEQ_OFFSET			6
+#define USBA_TOGGLE_SEQ_SIZE			2
+#define USBA_ERR_OVFLW				(1 <<  8)
+#define USBA_RX_BK_RDY				(1 <<  9)
+#define USBA_KILL_BANK				(1 <<  9)
+#define USBA_TX_COMPLETE			(1 << 10)
+#define USBA_TX_PK_RDY				(1 << 11)
+#define USBA_ISO_ERR_TRANS			(1 << 11)
+#define USBA_RX_SETUP				(1 << 12)
+#define USBA_ISO_ERR_FLOW			(1 << 12)
+#define USBA_STALL_SENT				(1 << 13)
+#define USBA_ISO_ERR_CRC			(1 << 13)
+#define USBA_ISO_ERR_NBTRANS			(1 << 13)
+#define USBA_NAK_IN				(1 << 14)
+#define USBA_ISO_ERR_FLUSH			(1 << 14)
+#define USBA_NAK_OUT				(1 << 15)
+#define USBA_CURRENT_BANK_OFFSET		16
+#define USBA_CURRENT_BANK_SIZE			2
+#define USBA_BUSY_BANKS_OFFSET			18
+#define USBA_BUSY_BANKS_SIZE			2
+#define USBA_BYTE_COUNT_OFFSET			20
+#define USBA_BYTE_COUNT_SIZE			11
+#define USBA_SHORT_PACKET			(1 << 31)
+
+/* Bitfields in DMA_CONTROL */
+#define USBA_DMA_CH_EN				(1 <<  0)
+#define USBA_DMA_LINK				(1 <<  1)
+#define USBA_DMA_END_TR_EN			(1 <<  2)
+#define USBA_DMA_END_BUF_EN			(1 <<  3)
+#define USBA_DMA_END_TR_IE			(1 <<  4)
+#define USBA_DMA_END_BUF_IE			(1 <<  5)
+#define USBA_DMA_DESC_LOAD_IE			(1 <<  6)
+#define USBA_DMA_BURST_LOCK			(1 <<  7)
+#define USBA_DMA_BUF_LEN_OFFSET			16
+#define USBA_DMA_BUF_LEN_SIZE			16
+
+/* Bitfields in DMA_STATUS */
+#define USBA_DMA_CH_ACTIVE			(1 <<  1)
+#define USBA_DMA_END_TR_ST			(1 <<  4)
+#define USBA_DMA_END_BUF_ST			(1 <<  5)
+#define USBA_DMA_DESC_LOAD_ST			(1 <<  6)
+
+/* Constants for SPEED_CFG */
+#define USBA_SPEED_CFG_NORMAL			0
+#define USBA_SPEED_CFG_FORCE_HIGH		2
+#define USBA_SPEED_CFG_FORCE_FULL		3
+
+/* Constants for EPT_SIZE */
+#define USBA_EPT_SIZE_8				0
+#define USBA_EPT_SIZE_16			1
+#define USBA_EPT_SIZE_32			2
+#define USBA_EPT_SIZE_64			3
+#define USBA_EPT_SIZE_128			4
+#define USBA_EPT_SIZE_256			5
+#define USBA_EPT_SIZE_512			6
+#define USBA_EPT_SIZE_1024			7
+
+/* Constants for EPT_TYPE */
+#define USBA_EPT_TYPE_CONTROL			0
+#define USBA_EPT_TYPE_ISO			1
+#define USBA_EPT_TYPE_BULK			2
+#define USBA_EPT_TYPE_INT			3
+
+/* Constants for BK_NUMBER */
+#define USBA_BK_NUMBER_ZERO			0
+#define USBA_BK_NUMBER_ONE			1
+#define USBA_BK_NUMBER_DOUBLE			2
+#define USBA_BK_NUMBER_TRIPLE			3
+
+/* Bit manipulation macros */
+#define USBA_BF(name, value)					\
+	(((value) & ((1 << USBA_##name##_SIZE) - 1))		\
+	 << USBA_##name##_OFFSET)
+#define USBA_BFEXT(name, value)					\
+	(((value) >> USBA_##name##_OFFSET)			\
+	 & ((1 << USBA_##name##_SIZE) - 1))
+#define USBA_BFINS(name, value, old)				\
+	(((old) & ~(((1 << USBA_##name##_SIZE) - 1)		\
+		    << USBA_##name##_OFFSET))			\
+	 | USBA_BF(name, value))
+
+/* Register access macros */
+#define usba_readl(udc, reg)					\
+	__raw_readl((udc)->regs + USBA_##reg)
+#define usba_writel(udc, reg, value)				\
+	__raw_writel((value), (udc)->regs + USBA_##reg)
+#define usba_ep_readl(ep, reg)					\
+	__raw_readl((ep)->ep_regs + USBA_EPT_##reg)
+#define usba_ep_writel(ep, reg, value)				\
+	__raw_writel((value), (ep)->ep_regs + USBA_EPT_##reg)
+#define usba_dma_readl(ep, reg)					\
+	__raw_readl((ep)->dma_regs + USBA_DMA_##reg)
+#define usba_dma_writel(ep, reg, value)				\
+	__raw_writel((value), (ep)->dma_regs + USBA_DMA_##reg)
+
+/* Calculate base address for a given endpoint or DMA controller */
+#define USBA_EPT_BASE(x)	(0x100 + (x) * 0x20)
+#define USBA_DMA_BASE(x)	(0x300 + (x) * 0x10)
+#define USBA_FIFO_BASE(x)	((x) << 16)
+
+/* Synth parameters */
+#define USBA_NR_ENDPOINTS	7
+
+#define EP0_FIFO_SIZE		64
+#define EP0_EPT_SIZE		USBA_EPT_SIZE_64
+#define EP0_NR_BANKS		1
+
+/*
+ * REVISIT: Try to eliminate this value. Can we rely on req->mapped to
+ * provide this information?
+ */
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#define FIFO_IOMEM_ID	0
+#define CTRL_IOMEM_ID	1
+
+#define DBG_ERR		0x0001	/* report all error returns */
+#define DBG_HW		0x0002	/* debug hardware initialization */
+#define DBG_GADGET	0x0004	/* calls to/from gadget driver */
+#define DBG_INT		0x0008	/* interrupts */
+#define DBG_BUS		0x0010	/* report changes in bus state */
+#define DBG_QUEUE	0x0020  /* debug request queue processing */
+#define DBG_FIFO	0x0040  /* debug FIFO contents */
+#define DBG_DMA		0x0080  /* debug DMA handling */
+#define DBG_REQ		0x0100	/* print out queued request length */
+#define DBG_ALL		0xffff
+#define DBG_NONE	0x0000
+
+#define DEBUG_LEVEL	(DBG_ERR)
+
+#define DBG(level, fmt, ...)					\
+	do {							\
+		if ((level) & DEBUG_LEVEL)			\
+			pr_debug("udc: " fmt, ## __VA_ARGS__);	\
+	} while (0)
+
+enum usba_ctrl_state {
+	WAIT_FOR_SETUP,
+	DATA_STAGE_IN,
+	DATA_STAGE_OUT,
+	STATUS_STAGE_IN,
+	STATUS_STAGE_OUT,
+	STATUS_STAGE_ADDR,
+	STATUS_STAGE_TEST,
+};
+/*
+  EP_STATE_IDLE,
+  EP_STATE_SETUP,
+  EP_STATE_IN_DATA,
+  EP_STATE_OUT_DATA,
+  EP_STATE_SET_ADDR_STATUS,
+  EP_STATE_RX_STATUS,
+  EP_STATE_TX_STATUS,
+  EP_STATE_HALT,
+*/
+
+struct usba_dma_desc {
+	dma_addr_t next;
+	dma_addr_t addr;
+	u32 ctrl;
+};
+
+struct usba_ep {
+	int					state;
+	void __iomem				*ep_regs;
+	void __iomem				*dma_regs;
+	void __iomem				*fifo;
+	struct usb_ep				ep;
+	struct usba_udc				*udc;
+
+	struct list_head			queue;
+	const struct usb_endpoint_descriptor	*desc;
+
+	u16					fifo_size;
+	u8					nr_banks;
+	u8					index;
+	unsigned int				can_dma:1;
+	unsigned int				can_isoc:1;
+	unsigned int				is_isoc:1;
+	unsigned int				is_in:1;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+	u32					last_dma_status;
+	struct dentry				*debugfs_dir;
+	struct dentry				*debugfs_queue;
+	struct dentry				*debugfs_dma_status;
+	struct dentry				*debugfs_state;
+#endif
+};
+
+struct usba_request {
+	struct usb_request			req;
+	struct list_head			queue;
+
+	u32					ctrl;
+
+	unsigned int				submitted:1;
+	unsigned int				last_transaction:1;
+	unsigned int				using_dma:1;
+	unsigned int				mapped:1;
+};
+
+struct usba_udc {
+	/* Protect hw registers from concurrent modifications */
+	spinlock_t lock;
+
+	void __iomem *regs;
+	void __iomem *fifo;
+
+	struct usb_gadget gadget;
+	struct usb_gadget_driver *driver;
+	struct platform_device *pdev;
+	int irq;
+	int vbus_pin;
+	int vbus_pin_inverted;
+	struct clk *pclk;
+	struct clk *hclk;
+
+	u16 devstatus;
+
+	u16 test_mode;
+	int vbus_prev;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+	struct dentry *debugfs_root;
+	struct dentry *debugfs_regs;
+#endif
+};
+
+static inline struct usba_ep *to_usba_ep(struct usb_ep *ep)
+{
+	return container_of(ep, struct usba_ep, ep);
+}
+
+static inline struct usba_request *to_usba_req(struct usb_request *req)
+{
+	return container_of(req, struct usba_request, req);
+}
+
+static inline struct usba_udc *to_usba_udc(struct usb_gadget *gadget)
+{
+	return container_of(gadget, struct usba_udc, gadget);
+}
+
+#define ep_is_control(ep)	((ep)->index == 0)
+#define ep_is_idle(ep)		((ep)->state == EP_STATE_IDLE)
+
+#endif /* __LINUX_USB_GADGET_USBA_UDC_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/audio.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/audio.c
new file mode 100644
index 0000000..9889924
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/audio.c
@@ -0,0 +1,224 @@
+/*
+ * audio.c -- Audio gadget driver
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+#define DRIVER_DESC		"Linux USB Audio Gadget"
+#define DRIVER_VERSION		"Feb 2, 2012"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language = 0x0409,	/* en-us */
+	.strings = strings_dev,
+};
+
+static struct usb_gadget_strings *audio_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+#ifdef CONFIG_GADGET_UAC1
+#include "u_uac1.h"
+#include "u_uac1.c"
+#include "f_uac1.c"
+#else
+#include "f_uac2.c"
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to Linux Foundation for donating this product ID. */
+#define AUDIO_VENDOR_NUM		0x1d6b	/* Linux Foundation */
+#define AUDIO_PRODUCT_NUM		0x0101	/* Linux-USB Audio Gadget */
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		__constant_cpu_to_le16(0x200),
+
+#ifdef CONFIG_GADGET_UAC1
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+#else
+	.bDeviceClass =		USB_CLASS_MISC,
+	.bDeviceSubClass =	0x02,
+	.bDeviceProtocol =	0x01,
+#endif
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id defaults change according to what configs
+	 * we support.  (As does bNumConfigurations.)  These values can
+	 * also be overridden by module parameters.
+	 */
+	.idVendor =		__constant_cpu_to_le16(AUDIO_VENDOR_NUM),
+	.idProduct =		__constant_cpu_to_le16(AUDIO_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init audio_do_config(struct usb_configuration *c)
+{
+	/* FIXME alloc iConfiguration string, set it in c->strings */
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	audio_bind_config(c);
+
+	return 0;
+}
+
+static struct usb_configuration audio_config_driver = {
+	.label			= DRIVER_DESC,
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+#ifndef CONFIG_GADGET_UAC1
+	.unbind			= uac2_unbind_config,
+#endif
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init audio_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	int			status;
+
+	gcnum = usb_gadget_controller_number(cdev->gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		ERROR(cdev, "controller '%s' not recognized; trying %s\n",
+			cdev->gadget->name,
+			audio_config_driver.label);
+		device_desc.bcdDevice =
+			__constant_cpu_to_le16(0x0300 | 0x0099);
+	}
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		cdev->gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	status = usb_add_config(cdev, &audio_config_driver, audio_do_config);
+	if (status < 0)
+		goto fail;
+
+	INFO(cdev, "%s, version: %s\n", DRIVER_DESC, DRIVER_VERSION);
+	return 0;
+
+fail:
+	return status;
+}
+
+static int __exit audio_unbind(struct usb_composite_dev *cdev)
+{
+#ifdef CONFIG_GADGET_UAC1
+	gaudio_cleanup();
+#endif
+	return 0;
+}
+
+static struct usb_composite_driver audio_driver = {
+	.name		= "g_audio",
+	.dev		= &device_desc,
+	.strings	= audio_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= __exit_p(audio_unbind),
+};
+
+static int __init init(void)
+{
+	return usb_composite_probe(&audio_driver, audio_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&audio_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Bryan Wu <cooloney@kernel.org>");
+MODULE_LICENSE("GPL");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/cdc2.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/cdc2.c
new file mode 100644
index 0000000..725550f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/cdc2.c
@@ -0,0 +1,257 @@
+/*
+ * cdc2.c -- CDC Composite driver, with ECM and ACM support
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+#include "u_ether.h"
+#include "u_serial.h"
+
+
+#define DRIVER_DESC		"CDC Composite Gadget"
+#define DRIVER_VERSION		"King Kamehameha Day 2008"
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ * It's for devices with only this composite CDC configuration.
+ */
+#define CDC_VENDOR_NUM		0x0525	/* NetChip */
+#define CDC_PRODUCT_NUM		0xa4aa	/* CDC Composite: ECM + ACM */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_ecm.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+
+	.bDeviceClass =		USB_CLASS_COMM,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id can be overridden by module parameters.  */
+	.idVendor =		cpu_to_le16(CDC_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(CDC_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static u8 hostaddr[ETH_ALEN];
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * We _always_ have both CDC ECM and CDC ACM functions.
+ */
+static int __init cdc_do_config(struct usb_configuration *c)
+{
+	int	status;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	status = ecm_bind_config(c, hostaddr);
+	if (status < 0)
+		return status;
+
+	status = acm_bind_config(c, 0);
+	if (status < 0)
+		return status;
+
+	return 0;
+}
+
+static struct usb_configuration cdc_config_driver = {
+	.label			= "CDC Composite (ECM + ACM)",
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init cdc_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			status;
+
+	if (!can_support_ecm(cdev->gadget)) {
+		dev_err(&gadget->dev, "controller '%s' not usable\n",
+				gadget->name);
+		return -EINVAL;
+	}
+
+	/* set up network link layer */
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		return status;
+
+	/* set up serial link layer */
+	status = gserial_setup(cdev->gadget, 1);
+	if (status < 0)
+		goto fail0;
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		/* We assume that can_support_ecm() tells the truth;
+		 * but if the controller isn't recognized at all then
+		 * that assumption is a bit more likely to be wrong.
+		 */
+		WARNING(cdev, "controller '%s' not recognized; trying %s\n",
+				gadget->name,
+				cdc_config_driver.label);
+		device_desc.bcdDevice =
+			cpu_to_le16(0x0300 | 0x0099);
+	}
+
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail1;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail1;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	/* register our configuration */
+	status = usb_add_config(cdev, &cdc_config_driver, cdc_do_config);
+	if (status < 0)
+		goto fail1;
+
+	dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
+			DRIVER_DESC);
+
+	return 0;
+
+fail1:
+	gserial_cleanup();
+fail0:
+	gether_cleanup();
+	return status;
+}
+
+static int __exit cdc_unbind(struct usb_composite_dev *cdev)
+{
+	gserial_cleanup();
+	gether_cleanup();
+	return 0;
+}
+
+static struct usb_composite_driver cdc_driver = {
+	.name		= "g_cdc",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= __exit_p(cdc_unbind),
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
+
+static int __init init(void)
+{
+	return usb_composite_probe(&cdc_driver, cdc_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&cdc_driver);
+}
+module_exit(cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_msm.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_msm.c
new file mode 100644
index 0000000..d07e44c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_msm.c
@@ -0,0 +1,126 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/usb/ulpi.h>
+
+#include "ci13xxx_udc.c"
+
+#define MSM_USB_BASE	(udc->regs)
+
+static irqreturn_t msm_udc_irq(int irq, void *data)
+{
+	return udc_irq();
+}
+
+static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
+{
+	struct device *dev = udc->gadget.dev.parent;
+	int val;
+
+	switch (event) {
+	case CI13XXX_CONTROLLER_RESET_EVENT:
+		dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
+		writel(0, USB_AHBBURST);
+		writel(0, USB_AHBMODE);
+		break;
+	case CI13XXX_CONTROLLER_STOPPED_EVENT:
+		dev_dbg(dev, "CI13XXX_CONTROLLER_STOPPED_EVENT received\n");
+		/*
+		 * Put the transceiver in non-driving mode. Otherwise host
+		 * may not detect soft-disconnection.
+		 */
+		val = usb_phy_io_read(udc->transceiver, ULPI_FUNC_CTRL);
+		val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+		val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+		usb_phy_io_write(udc->transceiver, val, ULPI_FUNC_CTRL);
+		break;
+	default:
+		dev_dbg(dev, "unknown ci13xxx_udc event\n");
+		break;
+	}
+}
+
+static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
+	.name			= "ci13xxx_msm",
+	.flags			= CI13XXX_REGS_SHARED |
+				  CI13XXX_REQUIRE_TRANSCEIVER |
+				  CI13XXX_PULLUP_ON_VBUS |
+				  CI13XXX_DISABLE_STREAMING,
+
+	.notify_event		= ci13xxx_msm_notify_event,
+};
+
+static int ci13xxx_msm_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	void __iomem *regs;
+	int irq;
+	int ret;
+
+	dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to get platform resource mem\n");
+		return -ENXIO;
+	}
+
+	regs = ioremap(res->start, resource_size(res));
+	if (!regs) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, regs);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "udc_probe failed\n");
+		goto iounmap;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "IRQ not found\n");
+		ret = -ENXIO;
+		goto udc_remove;
+	}
+
+	ret = request_irq(irq, msm_udc_irq, IRQF_SHARED, pdev->name, pdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "request_irq failed\n");
+		goto udc_remove;
+	}
+
+	pm_runtime_no_callbacks(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+
+udc_remove:
+	udc_remove();
+iounmap:
+	iounmap(regs);
+
+	return ret;
+}
+
+static struct platform_driver ci13xxx_msm_driver = {
+	.probe = ci13xxx_msm_probe,
+	.driver = { .name = "msm_hsusb", },
+};
+MODULE_ALIAS("platform:msm_hsusb");
+
+static int __init ci13xxx_msm_init(void)
+{
+	return platform_driver_register(&ci13xxx_msm_driver);
+}
+module_init(ci13xxx_msm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_pci.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_pci.c
new file mode 100644
index 0000000..883ab5e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_pci.c
@@ -0,0 +1,176 @@
+/*
+ * ci13xxx_pci.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ci13xxx_udc.c"
+
+/* driver name */
+#define UDC_DRIVER_NAME   "ci13xxx_pci"
+
+/******************************************************************************
+ * PCI block
+ *****************************************************************************/
+/**
+ * ci13xxx_pci_irq: interrut handler
+ * @irq:  irq number
+ * @pdev: USB Device Controller interrupt source
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * This is an ISR don't trace, use attribute interface instead
+ */
+static irqreturn_t ci13xxx_pci_irq(int irq, void *pdev)
+{
+	if (irq == 0) {
+		dev_err(&((struct pci_dev *)pdev)->dev, "Invalid IRQ0 usage!");
+		return IRQ_HANDLED;
+	}
+	return udc_irq();
+}
+
+static struct ci13xxx_udc_driver ci13xxx_pci_udc_driver = {
+	.name		= UDC_DRIVER_NAME,
+};
+
+/**
+ * ci13xxx_pci_probe: PCI probe
+ * @pdev: USB device controller being probed
+ * @id:   PCI hotplug ID connecting controller to UDC framework
+ *
+ * This function returns an error code
+ * Allocates basic PCI resources for this USB device controller, and then
+ * invokes the udc_probe() method to start the UDC associated with it
+ */
+static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *id)
+{
+	void __iomem *regs = NULL;
+	int retval = 0;
+
+	if (id == NULL)
+		return -EINVAL;
+
+	retval = pci_enable_device(pdev);
+	if (retval)
+		goto done;
+
+	if (!pdev->irq) {
+		dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
+		retval = -ENODEV;
+		goto disable_device;
+	}
+
+	retval = pci_request_regions(pdev, UDC_DRIVER_NAME);
+	if (retval)
+		goto disable_device;
+
+	/* BAR 0 holds all the registers */
+	regs = pci_iomap(pdev, 0, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "Error mapping memory!");
+		retval = -EFAULT;
+		goto release_regions;
+	}
+	pci_set_drvdata(pdev, (__force void *)regs);
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	retval = udc_probe(&ci13xxx_pci_udc_driver, &pdev->dev, regs);
+	if (retval)
+		goto iounmap;
+
+	/* our device does not have MSI capability */
+
+	retval = request_irq(pdev->irq, ci13xxx_pci_irq, IRQF_SHARED,
+			     UDC_DRIVER_NAME, pdev);
+	if (retval)
+		goto gadget_remove;
+
+	return 0;
+
+ gadget_remove:
+	udc_remove();
+ iounmap:
+	pci_iounmap(pdev, regs);
+ release_regions:
+	pci_release_regions(pdev);
+ disable_device:
+	pci_disable_device(pdev);
+ done:
+	return retval;
+}
+
+/**
+ * ci13xxx_pci_remove: PCI remove
+ * @pdev: USB Device Controller being removed
+ *
+ * Reverses the effect of ci13xxx_pci_probe(),
+ * first invoking the udc_remove() and then releases
+ * all PCI resources allocated for this USB device controller
+ */
+static void __devexit ci13xxx_pci_remove(struct pci_dev *pdev)
+{
+	free_irq(pdev->irq, pdev);
+	udc_remove();
+	pci_iounmap(pdev, (__force void __iomem *)pci_get_drvdata(pdev));
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * PCI device table
+ * PCI device structure
+ *
+ * Check "pci.h" for details
+ */
+static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
+	{ PCI_DEVICE(0x153F, 0x1004) },
+	{ PCI_DEVICE(0x153F, 0x1006) },
+	{ 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
+
+static struct pci_driver ci13xxx_pci_driver = {
+	.name         =	UDC_DRIVER_NAME,
+	.id_table     =	ci13xxx_pci_id_table,
+	.probe        =	ci13xxx_pci_probe,
+	.remove       =	__devexit_p(ci13xxx_pci_remove),
+};
+
+/**
+ * ci13xxx_pci_init: module init
+ *
+ * Driver load
+ */
+static int __init ci13xxx_pci_init(void)
+{
+	return pci_register_driver(&ci13xxx_pci_driver);
+}
+module_init(ci13xxx_pci_init);
+
+/**
+ * ci13xxx_pci_exit: module exit
+ *
+ * Driver unload
+ */
+static void __exit ci13xxx_pci_exit(void)
+{
+	pci_unregister_driver(&ci13xxx_pci_driver);
+}
+module_exit(ci13xxx_pci_exit);
+
+MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
+MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("June 2008");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_udc.c
new file mode 100644
index 0000000..238372e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_udc.c
@@ -0,0 +1,2996 @@
+/*
+ * ci13xxx_udc.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Description: MIPS USB IP core family device controller
+ *              Currently it only supports IP part number CI13412
+ *
+ * This driver is composed of several blocks:
+ * - HW:     hardware interface
+ * - DBG:    debug facilities (optional)
+ * - UTIL:   utilities
+ * - ISR:    interrupts handling
+ * - ENDPT:  endpoint operations (Gadget API)
+ * - GADGET: gadget operations (Gadget API)
+ * - BUS:    bus glue code, bus abstraction layer
+ *
+ * Compile Options
+ * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
+ * - STALL_IN:  non-empty bulk-in pipes cannot be halted
+ *              if defined mass storage compliance succeeds but with warnings
+ *              => case 4: Hi >  Dn
+ *              => case 5: Hi >  Di
+ *              => case 8: Hi <> Do
+ *              if undefined usbtest 13 fails
+ * - TRACE:     enable function tracing (depends on DEBUG)
+ *
+ * Main Features
+ * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
+ * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
+ * - Normal & LPM support
+ *
+ * USBTEST Report
+ * - OK: 0-12, 13 (STALL_IN defined) & 14
+ * - Not Supported: 15 & 16 (ISO)
+ *
+ * TODO List
+ * - OTG
+ * - Isochronous & Interrupt Traffic
+ * - Handle requests which spawns into several TDs
+ * - GET_STATUS(device) - always reports 0
+ * - Gadget API (majority of optional features)
+ * - Suspend & Remote Wakeup
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+
+#include "ci13xxx_udc.h"
+
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+/* ctrl register bank access */
+static DEFINE_SPINLOCK(udc_lock);
+
+/* control endpoint description */
+static const struct usb_endpoint_descriptor
+ctrl_endpt_out_desc = {
+	.bLength         = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bEndpointAddress = USB_DIR_OUT,
+	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+static const struct usb_endpoint_descriptor
+ctrl_endpt_in_desc = {
+	.bLength         = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bEndpointAddress = USB_DIR_IN,
+	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+/* UDC descriptor */
+static struct ci13xxx *_udc;
+
+/* Interrupt statistics */
+#define ISR_MASK   0x1F
+static struct {
+	u32 test;
+	u32 ui;
+	u32 uei;
+	u32 pci;
+	u32 uri;
+	u32 sli;
+	u32 none;
+	struct {
+		u32 cnt;
+		u32 buf[ISR_MASK+1];
+		u32 idx;
+	} hndl;
+} isr_statistics;
+
+/**
+ * ffs_nr: find first (least significant) bit set
+ * @x: the word to search
+ *
+ * This function returns bit number (instead of position)
+ */
+static int ffs_nr(u32 x)
+{
+	int n = ffs(x);
+
+	return n ? n-1 : 32;
+}
+
+/******************************************************************************
+ * HW block
+ *****************************************************************************/
+/* register bank descriptor */
+static struct {
+	unsigned      lpm;    /* is LPM? */
+	void __iomem *abs;    /* bus map offset */
+	void __iomem *cap;    /* bus map offset + CAP offset + CAP data */
+	size_t        size;   /* bank size */
+} hw_bank;
+
+/* MSM specific */
+#define ABS_AHBBURST        (0x0090UL)
+#define ABS_AHBMODE         (0x0098UL)
+/* UDC register map */
+#define ABS_CAPLENGTH       (0x100UL)
+#define ABS_HCCPARAMS       (0x108UL)
+#define ABS_DCCPARAMS       (0x124UL)
+#define ABS_TESTMODE        (hw_bank.lpm ? 0x0FCUL : 0x138UL)
+/* offset to CAPLENTGH (addr + data) */
+#define CAP_USBCMD          (0x000UL)
+#define CAP_USBSTS          (0x004UL)
+#define CAP_USBINTR         (0x008UL)
+#define CAP_DEVICEADDR      (0x014UL)
+#define CAP_ENDPTLISTADDR   (0x018UL)
+#define CAP_PORTSC          (0x044UL)
+#define CAP_DEVLC           (0x084UL)
+#define CAP_USBMODE         (hw_bank.lpm ? 0x0C8UL : 0x068UL)
+#define CAP_ENDPTSETUPSTAT  (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
+#define CAP_ENDPTPRIME      (hw_bank.lpm ? 0x0DCUL : 0x070UL)
+#define CAP_ENDPTFLUSH      (hw_bank.lpm ? 0x0E0UL : 0x074UL)
+#define CAP_ENDPTSTAT       (hw_bank.lpm ? 0x0E4UL : 0x078UL)
+#define CAP_ENDPTCOMPLETE   (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
+#define CAP_ENDPTCTRL       (hw_bank.lpm ? 0x0ECUL : 0x080UL)
+#define CAP_LAST            (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
+
+/* maximum number of enpoints: valid only after hw_device_reset() */
+static unsigned hw_ep_max;
+
+/**
+ * hw_ep_bit: calculates the bit number
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns bit number
+ */
+static inline int hw_ep_bit(int num, int dir)
+{
+	return num + (dir ? 16 : 0);
+}
+
+static int ep_to_bit(int n)
+{
+	int fill = 16 - hw_ep_max / 2;
+
+	if (n >= hw_ep_max / 2)
+		n += fill;
+
+	return n;
+}
+
+/**
+ * hw_aread: reads from register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_aread(u32 addr, u32 mask)
+{
+	return ioread32(addr + hw_bank.abs) & mask;
+}
+
+/**
+ * hw_awrite: writes to register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_awrite(u32 addr, u32 mask, u32 data)
+{
+	iowrite32(hw_aread(addr, ~mask) | (data & mask),
+		  addr + hw_bank.abs);
+}
+
+/**
+ * hw_cread: reads from register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_cread(u32 addr, u32 mask)
+{
+	return ioread32(addr + hw_bank.cap) & mask;
+}
+
+/**
+ * hw_cwrite: writes to register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_cwrite(u32 addr, u32 mask, u32 data)
+{
+	iowrite32(hw_cread(addr, ~mask) | (data & mask),
+		  addr + hw_bank.cap);
+}
+
+/**
+ * hw_ctest_and_clear: tests & clears register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_clear(u32 addr, u32 mask)
+{
+	u32 reg = hw_cread(addr, mask);
+
+	iowrite32(reg, addr + hw_bank.cap);
+	return reg;
+}
+
+/**
+ * hw_ctest_and_write: tests & writes register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
+{
+	u32 reg = hw_cread(addr, ~0);
+
+	iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
+	return (reg & mask) >> ffs_nr(mask);
+}
+
+static int hw_device_init(void __iomem *base)
+{
+	u32 reg;
+
+	/* bank is a module variable */
+	hw_bank.abs = base;
+
+	hw_bank.cap = hw_bank.abs;
+	hw_bank.cap += ABS_CAPLENGTH;
+	hw_bank.cap += ioread8(hw_bank.cap);
+
+	reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
+	hw_bank.lpm  = reg;
+	hw_bank.size = hw_bank.cap - hw_bank.abs;
+	hw_bank.size += CAP_LAST;
+	hw_bank.size /= sizeof(u32);
+
+	reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
+	hw_ep_max = reg * 2;   /* cache hw ENDPT_MAX */
+
+	if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
+		return -ENODEV;
+
+	/* setup lock mode ? */
+
+	/* ENDPTSETUPSTAT is '0' by default */
+
+	/* HCSPARAMS.bf.ppc SHOULD BE zero for device */
+
+	return 0;
+}
+/**
+ * hw_device_reset: resets chip (execute without interruption)
+ * @base: register base address
+ *
+ * This function returns an error code
+ */
+static int hw_device_reset(struct ci13xxx *udc)
+{
+	/* should flush & stop before reset */
+	hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
+	hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+
+	hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
+	while (hw_cread(CAP_USBCMD, USBCMD_RST))
+		udelay(10);             /* not RTOS friendly */
+
+
+	if (udc->udc_driver->notify_event)
+		udc->udc_driver->notify_event(udc,
+			CI13XXX_CONTROLLER_RESET_EVENT);
+
+	if (udc->udc_driver->flags & CI13XXX_DISABLE_STREAMING)
+		hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
+
+	/* USBMODE should be configured step by step */
+	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
+	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
+	hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);  /* HW >= 2.3 */
+
+	if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
+		pr_err("cannot enter in device mode");
+		pr_err("lpm = %i", hw_bank.lpm);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * hw_device_state: enables/disables interrupts & starts/stops device (execute
+ *                  without interruption)
+ * @dma: 0 => disable, !0 => enable and set dma engine
+ *
+ * This function returns an error code
+ */
+static int hw_device_state(u32 dma)
+{
+	if (dma) {
+		hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
+		/* interrupt, error, port change, reset, sleep/suspend */
+		hw_cwrite(CAP_USBINTR, ~0,
+			     USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+		hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
+	} else {
+		hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+		hw_cwrite(CAP_USBINTR, ~0, 0);
+	}
+	return 0;
+}
+
+/**
+ * hw_ep_flush: flush endpoint fifo (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_flush(int num, int dir)
+{
+	int n = hw_ep_bit(num, dir);
+
+	do {
+		/* flush any pending transfer */
+		hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
+		while (hw_cread(CAP_ENDPTFLUSH, BIT(n)))
+			cpu_relax();
+	} while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
+
+	return 0;
+}
+
+/**
+ * hw_ep_disable: disables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_disable(int num, int dir)
+{
+	hw_ep_flush(num, dir);
+	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
+		  dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
+	return 0;
+}
+
+/**
+ * hw_ep_enable: enables endpoint (execute without interruption)
+ * @num:  endpoint number
+ * @dir:  endpoint direction
+ * @type: endpoint type
+ *
+ * This function returns an error code
+ */
+static int hw_ep_enable(int num, int dir, int type)
+{
+	u32 mask, data;
+
+	if (dir) {
+		mask  = ENDPTCTRL_TXT;  /* type    */
+		data  = type << ffs_nr(mask);
+
+		mask |= ENDPTCTRL_TXS;  /* unstall */
+		mask |= ENDPTCTRL_TXR;  /* reset data toggle */
+		data |= ENDPTCTRL_TXR;
+		mask |= ENDPTCTRL_TXE;  /* enable  */
+		data |= ENDPTCTRL_TXE;
+	} else {
+		mask  = ENDPTCTRL_RXT;  /* type    */
+		data  = type << ffs_nr(mask);
+
+		mask |= ENDPTCTRL_RXS;  /* unstall */
+		mask |= ENDPTCTRL_RXR;  /* reset data toggle */
+		data |= ENDPTCTRL_RXR;
+		mask |= ENDPTCTRL_RXE;  /* enable  */
+		data |= ENDPTCTRL_RXE;
+	}
+	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
+	return 0;
+}
+
+/**
+ * hw_ep_get_halt: return endpoint halt status
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns 1 if endpoint halted
+ */
+static int hw_ep_get_halt(int num, int dir)
+{
+	u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+
+	return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
+}
+
+/**
+ * hw_test_and_clear_setup_status: test & clear setup status (execute without
+ *                                 interruption)
+ * @n: endpoint number
+ *
+ * This function returns setup status
+ */
+static int hw_test_and_clear_setup_status(int n)
+{
+	n = ep_to_bit(n);
+	return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
+}
+
+/**
+ * hw_ep_prime: primes endpoint (execute without interruption)
+ * @num:     endpoint number
+ * @dir:     endpoint direction
+ * @is_ctrl: true if control endpoint
+ *
+ * This function returns an error code
+ */
+static int hw_ep_prime(int num, int dir, int is_ctrl)
+{
+	int n = hw_ep_bit(num, dir);
+
+	if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+		return -EAGAIN;
+
+	hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
+
+	while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+		cpu_relax();
+	if (is_ctrl && dir == RX  && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+		return -EAGAIN;
+
+	/* status shoult be tested according with manual but it doesn't work */
+	return 0;
+}
+
+/**
+ * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
+ *                 without interruption)
+ * @num:   endpoint number
+ * @dir:   endpoint direction
+ * @value: true => stall, false => unstall
+ *
+ * This function returns an error code
+ */
+static int hw_ep_set_halt(int num, int dir, int value)
+{
+	if (value != 0 && value != 1)
+		return -EINVAL;
+
+	do {
+		u32 addr = CAP_ENDPTCTRL + num * sizeof(u32);
+		u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+		u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
+
+		/* data toggle - reserved for EP0 but it's in ESS */
+		hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
+
+	} while (value != hw_ep_get_halt(num, dir));
+
+	return 0;
+}
+
+/**
+ * hw_intr_clear: disables interrupt & clears interrupt status (execute without
+ *                interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_clear(int n)
+{
+	if (n >= REG_BITS)
+		return -EINVAL;
+
+	hw_cwrite(CAP_USBINTR, BIT(n), 0);
+	hw_cwrite(CAP_USBSTS,  BIT(n), BIT(n));
+	return 0;
+}
+
+/**
+ * hw_intr_force: enables interrupt & forces interrupt status (execute without
+ *                interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_force(int n)
+{
+	if (n >= REG_BITS)
+		return -EINVAL;
+
+	hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
+	hw_cwrite(CAP_USBINTR,  BIT(n), BIT(n));
+	hw_cwrite(CAP_USBSTS,   BIT(n), BIT(n));
+	hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
+	return 0;
+}
+
+/**
+ * hw_is_port_high_speed: test if port is high speed
+ *
+ * This function returns true if high speed port
+ */
+static int hw_port_is_high_speed(void)
+{
+	return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
+		hw_cread(CAP_PORTSC, PORTSC_HSP);
+}
+
+/**
+ * hw_port_test_get: reads port test mode value
+ *
+ * This function returns port test mode value
+ */
+static u8 hw_port_test_get(void)
+{
+	return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
+}
+
+/**
+ * hw_port_test_set: writes port test mode (execute without interruption)
+ * @mode: new value
+ *
+ * This function returns an error code
+ */
+static int hw_port_test_set(u8 mode)
+{
+	const u8 TEST_MODE_MAX = 7;
+
+	if (mode > TEST_MODE_MAX)
+		return -EINVAL;
+
+	hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
+	return 0;
+}
+
+/**
+ * hw_read_intr_enable: returns interrupt enable register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_enable(void)
+{
+	return hw_cread(CAP_USBINTR, ~0);
+}
+
+/**
+ * hw_read_intr_status: returns interrupt status register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_status(void)
+{
+	return hw_cread(CAP_USBSTS, ~0);
+}
+
+/**
+ * hw_register_read: reads all device registers (execute without interruption)
+ * @buf:  destination buffer
+ * @size: buffer size
+ *
+ * This function returns number of registers read
+ */
+static size_t hw_register_read(u32 *buf, size_t size)
+{
+	unsigned i;
+
+	if (size > hw_bank.size)
+		size = hw_bank.size;
+
+	for (i = 0; i < size; i++)
+		buf[i] = hw_aread(i * sizeof(u32), ~0);
+
+	return size;
+}
+
+/**
+ * hw_register_write: writes to register
+ * @addr: register address
+ * @data: register value
+ *
+ * This function returns an error code
+ */
+static int hw_register_write(u16 addr, u32 data)
+{
+	/* align */
+	addr /= sizeof(u32);
+
+	if (addr >= hw_bank.size)
+		return -EINVAL;
+
+	/* align */
+	addr *= sizeof(u32);
+
+	hw_awrite(addr, ~0, data);
+	return 0;
+}
+
+/**
+ * hw_test_and_clear_complete: test & clear complete status (execute without
+ *                             interruption)
+ * @n: endpoint number
+ *
+ * This function returns complete status
+ */
+static int hw_test_and_clear_complete(int n)
+{
+	n = ep_to_bit(n);
+	return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
+}
+
+/**
+ * hw_test_and_clear_intr_active: test & clear active interrupts (execute
+ *                                without interruption)
+ *
+ * This function returns active interrutps
+ */
+static u32 hw_test_and_clear_intr_active(void)
+{
+	u32 reg = hw_read_intr_status() & hw_read_intr_enable();
+
+	hw_cwrite(CAP_USBSTS, ~0, reg);
+	return reg;
+}
+
+/**
+ * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
+ *                                interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_clear_setup_guard(void)
+{
+	return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
+}
+
+/**
+ * hw_test_and_set_setup_guard: test & set setup guard (execute without
+ *                              interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_set_setup_guard(void)
+{
+	return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
+}
+
+/**
+ * hw_usb_set_address: configures USB address (execute without interruption)
+ * @value: new USB address
+ *
+ * This function returns an error code
+ */
+static int hw_usb_set_address(u8 value)
+{
+	/* advance */
+	hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
+		  value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
+	return 0;
+}
+
+/**
+ * hw_usb_reset: restart device after a bus reset (execute without
+ *               interruption)
+ *
+ * This function returns an error code
+ */
+static int hw_usb_reset(void)
+{
+	hw_usb_set_address(0);
+
+	/* ESS flushes only at end?!? */
+	hw_cwrite(CAP_ENDPTFLUSH,    ~0, ~0);   /* flush all EPs */
+
+	/* clear setup token semaphores */
+	hw_cwrite(CAP_ENDPTSETUPSTAT, 0,  0);   /* writes its content */
+
+	/* clear complete status */
+	hw_cwrite(CAP_ENDPTCOMPLETE,  0,  0);   /* writes its content */
+
+	/* wait until all bits cleared */
+	while (hw_cread(CAP_ENDPTPRIME, ~0))
+		udelay(10);             /* not RTOS friendly */
+
+	/* reset all endpoints ? */
+
+	/* reset internal status and wait for further instructions
+	   no need to verify the port reset status (ESS does it) */
+
+	return 0;
+}
+
+/******************************************************************************
+ * DBG block
+ *****************************************************************************/
+/**
+ * show_device: prints information about device capabilities and status
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_device(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	struct usb_gadget *gadget = &udc->gadget;
+	int n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	n += scnprintf(buf + n, PAGE_SIZE - n, "speed             = %d\n",
+		       gadget->speed);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed         = %d\n",
+		       gadget->max_speed);
+	/* TODO: Scheduled for removal in 3.8. */
+	n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed      = %d\n",
+		       gadget_is_dualspeed(gadget));
+	n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg            = %d\n",
+		       gadget->is_otg);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral   = %d\n",
+		       gadget->is_a_peripheral);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable      = %d\n",
+		       gadget->b_hnp_enable);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support     = %d\n",
+		       gadget->a_hnp_support);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
+		       gadget->a_alt_hnp_support);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "name              = %s\n",
+		       (gadget->name ? gadget->name : ""));
+
+	return n;
+}
+static DEVICE_ATTR(device, S_IRUSR, show_device, NULL);
+
+/**
+ * show_driver: prints information about attached gadget (if any)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	struct usb_gadget_driver *driver = udc->driver;
+	int n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	if (driver == NULL)
+		return scnprintf(buf, PAGE_SIZE,
+				 "There is no gadget attached!\n");
+
+	n += scnprintf(buf + n, PAGE_SIZE - n, "function  = %s\n",
+		       (driver->function ? driver->function : ""));
+	n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
+		       driver->max_speed);
+
+	return n;
+}
+static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL);
+
+/* Maximum event message length */
+#define DBG_DATA_MSG   64UL
+
+/* Maximum event messages */
+#define DBG_DATA_MAX   128UL
+
+/* Event buffer descriptor */
+static struct {
+	char     (buf[DBG_DATA_MAX])[DBG_DATA_MSG];   /* buffer */
+	unsigned idx;   /* index */
+	unsigned tty;   /* print to console? */
+	rwlock_t lck;   /* lock */
+} dbg_data = {
+	.idx = 0,
+	.tty = 0,
+	.lck = __RW_LOCK_UNLOCKED(dbg_data.lck)
+};
+
+/**
+ * dbg_dec: decrements debug event index
+ * @idx: buffer index
+ */
+static void dbg_dec(unsigned *idx)
+{
+	*idx = (*idx - 1) & (DBG_DATA_MAX-1);
+}
+
+/**
+ * dbg_inc: increments debug event index
+ * @idx: buffer index
+ */
+static void dbg_inc(unsigned *idx)
+{
+	*idx = (*idx + 1) & (DBG_DATA_MAX-1);
+}
+
+/**
+ * dbg_print:  prints the common part of the event
+ * @addr:   endpoint address
+ * @name:   event name
+ * @status: status
+ * @extra:  extra information
+ */
+static void dbg_print(u8 addr, const char *name, int status, const char *extra)
+{
+	struct timeval tval;
+	unsigned int stamp;
+	unsigned long flags;
+
+	write_lock_irqsave(&dbg_data.lck, flags);
+
+	do_gettimeofday(&tval);
+	stamp = tval.tv_sec & 0xFFFF;	/* 2^32 = 4294967296. Limit to 4096s */
+	stamp = stamp * 1000000 + tval.tv_usec;
+
+	scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
+		  "%04X\t? %02X %-7.7s %4i ?\t%s\n",
+		  stamp, addr, name, status, extra);
+
+	dbg_inc(&dbg_data.idx);
+
+	write_unlock_irqrestore(&dbg_data.lck, flags);
+
+	if (dbg_data.tty != 0)
+		pr_notice("%04X\t? %02X %-7.7s %4i ?\t%s\n",
+			  stamp, addr, name, status, extra);
+}
+
+/**
+ * dbg_done: prints a DONE event
+ * @addr:   endpoint address
+ * @td:     transfer descriptor
+ * @status: status
+ */
+static void dbg_done(u8 addr, const u32 token, int status)
+{
+	char msg[DBG_DATA_MSG];
+
+	scnprintf(msg, sizeof(msg), "%d %02X",
+		  (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
+		  (int)(token & TD_STATUS)      >> ffs_nr(TD_STATUS));
+	dbg_print(addr, "DONE", status, msg);
+}
+
+/**
+ * dbg_event: prints a generic event
+ * @addr:   endpoint address
+ * @name:   event name
+ * @status: status
+ */
+static void dbg_event(u8 addr, const char *name, int status)
+{
+	if (name != NULL)
+		dbg_print(addr, name, status, "");
+}
+
+/*
+ * dbg_queue: prints a QUEUE event
+ * @addr:   endpoint address
+ * @req:    USB request
+ * @status: status
+ */
+static void dbg_queue(u8 addr, const struct usb_request *req, int status)
+{
+	char msg[DBG_DATA_MSG];
+
+	if (req != NULL) {
+		scnprintf(msg, sizeof(msg),
+			  "%d %d", !req->no_interrupt, req->length);
+		dbg_print(addr, "QUEUE", status, msg);
+	}
+}
+
+/**
+ * dbg_setup: prints a SETUP event
+ * @addr: endpoint address
+ * @req:  setup request
+ */
+static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
+{
+	char msg[DBG_DATA_MSG];
+
+	if (req != NULL) {
+		scnprintf(msg, sizeof(msg),
+			  "%02X %02X %04X %04X %d", req->bRequestType,
+			  req->bRequest, le16_to_cpu(req->wValue),
+			  le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
+		dbg_print(addr, "SETUP", 0, msg);
+	}
+}
+
+/**
+ * show_events: displays the event buffer
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_events(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	unsigned long flags;
+	unsigned i, j, n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	read_lock_irqsave(&dbg_data.lck, flags);
+
+	i = dbg_data.idx;
+	for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
+		n += strlen(dbg_data.buf[i]);
+		if (n >= PAGE_SIZE) {
+			n -= strlen(dbg_data.buf[i]);
+			break;
+		}
+	}
+	for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
+		j += scnprintf(buf + j, PAGE_SIZE - j,
+			       "%s", dbg_data.buf[i]);
+
+	read_unlock_irqrestore(&dbg_data.lck, flags);
+
+	return n;
+}
+
+/**
+ * store_events: configure if events are going to be also printed to console
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_events(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned tty;
+
+	dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (sscanf(buf, "%u", &tty) != 1 || tty > 1) {
+		dev_err(dev, "<1|0>: enable|disable console log\n");
+		goto done;
+	}
+
+	dbg_data.tty = tty;
+	dev_info(dev, "tty = %u", dbg_data.tty);
+
+ done:
+	return count;
+}
+static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events);
+
+/**
+ * show_inters: interrupt status, enable status and historic
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	u32 intr;
+	unsigned i, j, n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+
+	n += scnprintf(buf + n, PAGE_SIZE - n,
+		       "status = %08x\n", hw_read_intr_status());
+	n += scnprintf(buf + n, PAGE_SIZE - n,
+		       "enable = %08x\n", hw_read_intr_enable());
+
+	n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
+		       isr_statistics.test);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "? ui  = %d\n",
+		       isr_statistics.ui);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "? uei = %d\n",
+		       isr_statistics.uei);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "? pci = %d\n",
+		       isr_statistics.pci);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "? uri = %d\n",
+		       isr_statistics.uri);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "? sli = %d\n",
+		       isr_statistics.sli);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
+		       isr_statistics.none);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
+		       isr_statistics.hndl.cnt);
+
+	for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
+		i   &= ISR_MASK;
+		intr = isr_statistics.hndl.buf[i];
+
+		if (USBi_UI  & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "ui  ");
+		intr &= ~USBi_UI;
+		if (USBi_UEI & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
+		intr &= ~USBi_UEI;
+		if (USBi_PCI & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
+		intr &= ~USBi_PCI;
+		if (USBi_URI & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
+		intr &= ~USBi_URI;
+		if (USBi_SLI & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
+		intr &= ~USBi_SLI;
+		if (intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
+		if (isr_statistics.hndl.buf[i])
+			n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+	}
+
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	return n;
+}
+
+/**
+ * store_inters: enable & force or disable an individual interrutps
+ *                   (to be used for test purposes only)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	unsigned en, bit;
+
+	dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
+		dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
+		goto done;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (en) {
+		if (hw_intr_force(bit))
+			dev_err(dev, "invalid bit number\n");
+		else
+			isr_statistics.test++;
+	} else {
+		if (hw_intr_clear(bit))
+			dev_err(dev, "invalid bit number\n");
+	}
+	spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+	return count;
+}
+static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters);
+
+/**
+ * show_port_test: reads port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_port_test(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	unsigned mode;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	mode = hw_port_test_get();
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
+}
+
+/**
+ * store_port_test: writes port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_port_test(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	unsigned mode;
+
+	dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (sscanf(buf, "%u", &mode) != 1) {
+		dev_err(dev, "<mode>: set port test mode");
+		goto done;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (hw_port_test_set(mode))
+		dev_err(dev, "invalid mode\n");
+	spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+	return count;
+}
+static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR,
+		   show_port_test, store_port_test);
+
+/**
+ * show_qheads: DMA contents of all queue heads
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	unsigned i, j, n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	for (i = 0; i < hw_ep_max/2; i++) {
+		struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
+		struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "EP=%02i: RX=%08X TX=%08X\n",
+			       i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
+		for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+				       " %04X:    %08X    %08X\n", j,
+				       *((u32 *)mEpRx->qh.ptr + j),
+				       *((u32 *)mEpTx->qh.ptr + j));
+		}
+	}
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	return n;
+}
+static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
+
+/**
+ * show_registers: dumps all registers
+ *
+ * Check "device.h" for details
+ */
+#define DUMP_ENTRIES	512
+static ssize_t show_registers(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	u32 *dump;
+	unsigned i, k, n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
+	if (!dump) {
+		dev_err(dev, "%s: out of memory\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	k = hw_register_read(dump, DUMP_ENTRIES);
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	for (i = 0; i < k; i++) {
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "reg[0x%04X] = 0x%08X\n",
+			       i * (unsigned)sizeof(u32), dump[i]);
+	}
+	kfree(dump);
+
+	return n;
+}
+
+/**
+ * store_registers: writes value to register address
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_registers(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long addr, data, flags;
+
+	dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (sscanf(buf, "%li %li", &addr, &data) != 2) {
+		dev_err(dev, "<addr> <data>: write data to register address");
+		goto done;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (hw_register_write(addr, data))
+		dev_err(dev, "invalid address range\n");
+	spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+	return count;
+}
+static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR,
+		   show_registers, store_registers);
+
+/**
+ * show_requests: DMA contents of all requests currently queued (all endpts)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	struct list_head   *ptr = NULL;
+	struct ci13xxx_req *req = NULL;
+	unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	for (i = 0; i < hw_ep_max; i++)
+		list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
+		{
+			req = list_entry(ptr, struct ci13xxx_req, queue);
+
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+					"EP=%02i: TD=%08X %s\n",
+					i % hw_ep_max/2, (u32)req->dma,
+					((i < hw_ep_max/2) ? "RX" : "TX"));
+
+			for (j = 0; j < qSize; j++)
+				n += scnprintf(buf + n, PAGE_SIZE - n,
+						" %04X:    %08X\n", j,
+						*((u32 *)req->ptr + j));
+		}
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	return n;
+}
+static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
+
+/**
+ * dbg_create_files: initializes the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+__maybe_unused static int dbg_create_files(struct device *dev)
+{
+	int retval = 0;
+
+	if (dev == NULL)
+		return -EINVAL;
+	retval = device_create_file(dev, &dev_attr_device);
+	if (retval)
+		goto done;
+	retval = device_create_file(dev, &dev_attr_driver);
+	if (retval)
+		goto rm_device;
+	retval = device_create_file(dev, &dev_attr_events);
+	if (retval)
+		goto rm_driver;
+	retval = device_create_file(dev, &dev_attr_inters);
+	if (retval)
+		goto rm_events;
+	retval = device_create_file(dev, &dev_attr_port_test);
+	if (retval)
+		goto rm_inters;
+	retval = device_create_file(dev, &dev_attr_qheads);
+	if (retval)
+		goto rm_port_test;
+	retval = device_create_file(dev, &dev_attr_registers);
+	if (retval)
+		goto rm_qheads;
+	retval = device_create_file(dev, &dev_attr_requests);
+	if (retval)
+		goto rm_registers;
+	return 0;
+
+ rm_registers:
+	device_remove_file(dev, &dev_attr_registers);
+ rm_qheads:
+	device_remove_file(dev, &dev_attr_qheads);
+ rm_port_test:
+	device_remove_file(dev, &dev_attr_port_test);
+ rm_inters:
+	device_remove_file(dev, &dev_attr_inters);
+ rm_events:
+	device_remove_file(dev, &dev_attr_events);
+ rm_driver:
+	device_remove_file(dev, &dev_attr_driver);
+ rm_device:
+	device_remove_file(dev, &dev_attr_device);
+ done:
+	return retval;
+}
+
+/**
+ * dbg_remove_files: destroys the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+__maybe_unused static int dbg_remove_files(struct device *dev)
+{
+	if (dev == NULL)
+		return -EINVAL;
+	device_remove_file(dev, &dev_attr_requests);
+	device_remove_file(dev, &dev_attr_registers);
+	device_remove_file(dev, &dev_attr_qheads);
+	device_remove_file(dev, &dev_attr_port_test);
+	device_remove_file(dev, &dev_attr_inters);
+	device_remove_file(dev, &dev_attr_events);
+	device_remove_file(dev, &dev_attr_driver);
+	device_remove_file(dev, &dev_attr_device);
+	return 0;
+}
+
+/******************************************************************************
+ * UTIL block
+ *****************************************************************************/
+/**
+ * _usb_addr: calculates endpoint address from direction & number
+ * @ep:  endpoint
+ */
+static inline u8 _usb_addr(struct ci13xxx_ep *ep)
+{
+	return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
+}
+
+/**
+ * _hardware_queue: configures a request at hardware level
+ * @gadget: gadget
+ * @mEp:    endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+	unsigned i;
+	int ret = 0;
+	unsigned length = mReq->req.length;
+
+	trace("%p, %p", mEp, mReq);
+
+	/* don't queue twice */
+	if (mReq->req.status == -EALREADY)
+		return -EALREADY;
+
+	mReq->req.status = -EALREADY;
+	if (length && mReq->req.dma == DMA_ADDR_INVALID) {
+		mReq->req.dma = \
+			dma_map_single(mEp->device, mReq->req.buf,
+				       length, mEp->dir ? DMA_TO_DEVICE :
+				       DMA_FROM_DEVICE);
+		if (mReq->req.dma == 0)
+			return -ENOMEM;
+
+		mReq->map = 1;
+	}
+
+	if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
+		mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
+					   &mReq->zdma);
+		if (mReq->zptr == NULL) {
+			if (mReq->map) {
+				dma_unmap_single(mEp->device, mReq->req.dma,
+					length, mEp->dir ? DMA_TO_DEVICE :
+					DMA_FROM_DEVICE);
+				mReq->req.dma = DMA_ADDR_INVALID;
+				mReq->map     = 0;
+			}
+			return -ENOMEM;
+		}
+		memset(mReq->zptr, 0, sizeof(*mReq->zptr));
+		mReq->zptr->next    = TD_TERMINATE;
+		mReq->zptr->token   = TD_STATUS_ACTIVE;
+		if (!mReq->req.no_interrupt)
+			mReq->zptr->token   |= TD_IOC;
+	}
+	/*
+	 * TD configuration
+	 * TODO - handle requests which spawns into several TDs
+	 */
+	memset(mReq->ptr, 0, sizeof(*mReq->ptr));
+	mReq->ptr->token    = length << ffs_nr(TD_TOTAL_BYTES);
+	mReq->ptr->token   &= TD_TOTAL_BYTES;
+	mReq->ptr->token   |= TD_STATUS_ACTIVE;
+	if (mReq->zptr) {
+		mReq->ptr->next    = mReq->zdma;
+	} else {
+		mReq->ptr->next    = TD_TERMINATE;
+		if (!mReq->req.no_interrupt)
+			mReq->ptr->token  |= TD_IOC;
+	}
+	mReq->ptr->page[0]  = mReq->req.dma;
+	for (i = 1; i < 5; i++)
+		mReq->ptr->page[i] =
+			(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
+
+	if (!list_empty(&mEp->qh.queue)) {
+		struct ci13xxx_req *mReqPrev;
+		int n = hw_ep_bit(mEp->num, mEp->dir);
+		int tmp_stat;
+
+		mReqPrev = list_entry(mEp->qh.queue.prev,
+				struct ci13xxx_req, queue);
+		if (mReqPrev->zptr)
+			mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
+		else
+			mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
+		wmb();
+		if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+			goto done;
+		do {
+			hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
+			tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
+		} while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
+		hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
+		if (tmp_stat)
+			goto done;
+	}
+
+	/*  QH configuration */
+	mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
+	mEp->qh.ptr->td.token &= ~TD_STATUS;   /* clear status */
+	mEp->qh.ptr->cap |=  QH_ZLT;
+
+	wmb();   /* synchronize before ep prime */
+
+	ret = hw_ep_prime(mEp->num, mEp->dir,
+			   mEp->type == USB_ENDPOINT_XFER_CONTROL);
+done:
+	return ret;
+}
+
+/**
+ * _hardware_dequeue: handles a request at hardware level
+ * @gadget: gadget
+ * @mEp:    endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+	trace("%p, %p", mEp, mReq);
+
+	if (mReq->req.status != -EALREADY)
+		return -EINVAL;
+
+	if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
+		return -EBUSY;
+
+	if (mReq->zptr) {
+		if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
+			return -EBUSY;
+		dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
+		mReq->zptr = NULL;
+	}
+
+	mReq->req.status = 0;
+
+	if (mReq->map) {
+		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		mReq->req.dma = DMA_ADDR_INVALID;
+		mReq->map     = 0;
+	}
+
+	mReq->req.status = mReq->ptr->token & TD_STATUS;
+	if ((TD_STATUS_HALTED & mReq->req.status) != 0)
+		mReq->req.status = -1;
+	else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
+		mReq->req.status = -1;
+	else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
+		mReq->req.status = -1;
+
+	mReq->req.actual   = mReq->ptr->token & TD_TOTAL_BYTES;
+	mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
+	mReq->req.actual   = mReq->req.length - mReq->req.actual;
+	mReq->req.actual   = mReq->req.status ? 0 : mReq->req.actual;
+
+	return mReq->req.actual;
+}
+
+/**
+ * _ep_nuke: dequeues all endpoint requests
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int _ep_nuke(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+	trace("%p", mEp);
+
+	if (mEp == NULL)
+		return -EINVAL;
+
+	hw_ep_flush(mEp->num, mEp->dir);
+
+	while (!list_empty(&mEp->qh.queue)) {
+
+		/* pop oldest request */
+		struct ci13xxx_req *mReq = \
+			list_entry(mEp->qh.queue.next,
+				   struct ci13xxx_req, queue);
+		list_del_init(&mReq->queue);
+		mReq->req.status = -ESHUTDOWN;
+
+		if (mReq->req.complete != NULL) {
+			spin_unlock(mEp->lock);
+			mReq->req.complete(&mEp->ep, &mReq->req);
+			spin_lock(mEp->lock);
+		}
+	}
+	return 0;
+}
+
+/**
+ * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
+ * @gadget: gadget
+ *
+ * This function returns an error code
+ */
+static int _gadget_stop_activity(struct usb_gadget *gadget)
+{
+	struct usb_ep *ep;
+	struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
+	unsigned long flags;
+
+	trace("%p", gadget);
+
+	if (gadget == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(udc->lock, flags);
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	udc->remote_wakeup = 0;
+	udc->suspended = 0;
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	/* flush all endpoints */
+	gadget_for_each_ep(ep, gadget) {
+		usb_ep_fifo_flush(ep);
+	}
+	usb_ep_fifo_flush(&udc->ep0out.ep);
+	usb_ep_fifo_flush(&udc->ep0in.ep);
+
+	udc->driver->disconnect(gadget);
+
+	/* make sure to disable all endpoints */
+	gadget_for_each_ep(ep, gadget) {
+		usb_ep_disable(ep);
+	}
+
+	if (udc->status != NULL) {
+		usb_ep_free_request(&udc->ep0in.ep, udc->status);
+		udc->status = NULL;
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+ * ISR block
+ *****************************************************************************/
+/**
+ * isr_reset_handler: USB reset interrupt handler
+ * @udc: UDC device
+ *
+ * This function resets USB engine after a bus reset occurred
+ */
+static void isr_reset_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+	int retval;
+
+	trace("%p", udc);
+
+	if (udc == NULL) {
+		err("EINVAL");
+		return;
+	}
+
+	dbg_event(0xFF, "BUS RST", 0);
+
+	spin_unlock(udc->lock);
+	retval = _gadget_stop_activity(&udc->gadget);
+	if (retval)
+		goto done;
+
+	retval = hw_usb_reset();
+	if (retval)
+		goto done;
+
+	udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
+	if (udc->status == NULL)
+		retval = -ENOMEM;
+
+	spin_lock(udc->lock);
+
+ done:
+	if (retval)
+		err("error: %i", retval);
+}
+
+/**
+ * isr_get_status_complete: get_status request complete function
+ * @ep:  endpoint
+ * @req: request handled
+ *
+ * Caller must release lock
+ */
+static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	trace("%p, %p", ep, req);
+
+	if (ep == NULL || req == NULL) {
+		err("EINVAL");
+		return;
+	}
+
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+/**
+ * isr_get_status_response: get_status request response
+ * @udc: udc struct
+ * @setup: setup request packet
+ *
+ * This function returns an error code
+ */
+static int isr_get_status_response(struct ci13xxx *udc,
+				   struct usb_ctrlrequest *setup)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+	struct ci13xxx_ep *mEp = &udc->ep0in;
+	struct usb_request *req = NULL;
+	gfp_t gfp_flags = GFP_ATOMIC;
+	int dir, num, retval;
+
+	trace("%p, %p", mEp, setup);
+
+	if (mEp == NULL || setup == NULL)
+		return -EINVAL;
+
+	spin_unlock(mEp->lock);
+	req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
+	spin_lock(mEp->lock);
+	if (req == NULL)
+		return -ENOMEM;
+
+	req->complete = isr_get_status_complete;
+	req->length   = 2;
+	req->buf      = kzalloc(req->length, gfp_flags);
+	if (req->buf == NULL) {
+		retval = -ENOMEM;
+		goto err_free_req;
+	}
+
+	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		/* Assume that device is bus powered for now. */
+		*((u16 *)req->buf) = _udc->remote_wakeup << 1;
+		retval = 0;
+	} else if ((setup->bRequestType & USB_RECIP_MASK) \
+		   == USB_RECIP_ENDPOINT) {
+		dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
+			TX : RX;
+		num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+		*((u16 *)req->buf) = hw_ep_get_halt(num, dir);
+	}
+	/* else do nothing; reserved for future use */
+
+	spin_unlock(mEp->lock);
+	retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
+	spin_lock(mEp->lock);
+	if (retval)
+		goto err_free_buf;
+
+	return 0;
+
+ err_free_buf:
+	kfree(req->buf);
+ err_free_req:
+	spin_unlock(mEp->lock);
+	usb_ep_free_request(&mEp->ep, req);
+	spin_lock(mEp->lock);
+	return retval;
+}
+
+/**
+ * isr_setup_status_complete: setup_status request complete function
+ * @ep:  endpoint
+ * @req: request handled
+ *
+ * Caller must release lock. Put the port in test mode if test mode
+ * feature is selected.
+ */
+static void
+isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct ci13xxx *udc = req->context;
+	unsigned long flags;
+
+	trace("%p, %p", ep, req);
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (udc->test_mode)
+		hw_port_test_set(udc->test_mode);
+	spin_unlock_irqrestore(udc->lock, flags);
+}
+
+/**
+ * isr_setup_status_phase: queues the status phase of a setup transation
+ * @udc: udc struct
+ *
+ * This function returns an error code
+ */
+static int isr_setup_status_phase(struct ci13xxx *udc)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+	int retval;
+	struct ci13xxx_ep *mEp;
+
+	trace("%p", udc);
+
+	mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
+	udc->status->context = udc;
+	udc->status->complete = isr_setup_status_complete;
+
+	spin_unlock(mEp->lock);
+	retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
+	spin_lock(mEp->lock);
+
+	return retval;
+}
+
+/**
+ * isr_tr_complete_low: transaction complete low level handler
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+	struct ci13xxx_req *mReq, *mReqTemp;
+	struct ci13xxx_ep *mEpTemp = mEp;
+	int uninitialized_var(retval);
+
+	trace("%p", mEp);
+
+	if (list_empty(&mEp->qh.queue))
+		return -EINVAL;
+
+	list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
+			queue) {
+		retval = _hardware_dequeue(mEp, mReq);
+		if (retval < 0)
+			break;
+		list_del_init(&mReq->queue);
+		dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
+		if (mReq->req.complete != NULL) {
+			spin_unlock(mEp->lock);
+			if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
+					mReq->req.length)
+				mEpTemp = &_udc->ep0in;
+			mReq->req.complete(&mEpTemp->ep, &mReq->req);
+			spin_lock(mEp->lock);
+		}
+	}
+
+	if (retval == -EBUSY)
+		retval = 0;
+	if (retval < 0)
+		dbg_event(_usb_addr(mEp), "DONE", retval);
+
+	return retval;
+}
+
+/**
+ * isr_tr_complete_handler: transaction complete interrupt handler
+ * @udc: UDC descriptor
+ *
+ * This function handles traffic events
+ */
+static void isr_tr_complete_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+	unsigned i;
+	u8 tmode = 0;
+
+	trace("%p", udc);
+
+	if (udc == NULL) {
+		err("EINVAL");
+		return;
+	}
+
+	for (i = 0; i < hw_ep_max; i++) {
+		struct ci13xxx_ep *mEp  = &udc->ci13xxx_ep[i];
+		int type, num, dir, err = -EINVAL;
+		struct usb_ctrlrequest req;
+
+		if (mEp->desc == NULL)
+			continue;   /* not configured */
+
+		if (hw_test_and_clear_complete(i)) {
+			err = isr_tr_complete_low(mEp);
+			if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+				if (err > 0)   /* needs status phase */
+					err = isr_setup_status_phase(udc);
+				if (err < 0) {
+					dbg_event(_usb_addr(mEp),
+						  "ERROR", err);
+					spin_unlock(udc->lock);
+					if (usb_ep_set_halt(&mEp->ep))
+						err("error: ep_set_halt");
+					spin_lock(udc->lock);
+				}
+			}
+		}
+
+		if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
+		    !hw_test_and_clear_setup_status(i))
+			continue;
+
+		if (i != 0) {
+			warn("ctrl traffic received at endpoint");
+			continue;
+		}
+
+		/*
+		 * Flush data and handshake transactions of previous
+		 * setup packet.
+		 */
+		_ep_nuke(&udc->ep0out);
+		_ep_nuke(&udc->ep0in);
+
+		/* read_setup_packet */
+		do {
+			hw_test_and_set_setup_guard();
+			memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
+		} while (!hw_test_and_clear_setup_guard());
+
+		type = req.bRequestType;
+
+		udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
+
+		dbg_setup(_usb_addr(mEp), &req);
+
+		switch (req.bRequest) {
+		case USB_REQ_CLEAR_FEATURE:
+			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+					le16_to_cpu(req.wValue) ==
+					USB_ENDPOINT_HALT) {
+				if (req.wLength != 0)
+					break;
+				num  = le16_to_cpu(req.wIndex);
+				dir = num & USB_ENDPOINT_DIR_MASK;
+				num &= USB_ENDPOINT_NUMBER_MASK;
+				if (dir) /* TX */
+					num += hw_ep_max/2;
+				if (!udc->ci13xxx_ep[num].wedge) {
+					spin_unlock(udc->lock);
+					err = usb_ep_clear_halt(
+						&udc->ci13xxx_ep[num].ep);
+					spin_lock(udc->lock);
+					if (err)
+						break;
+				}
+				err = isr_setup_status_phase(udc);
+			} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
+					le16_to_cpu(req.wValue) ==
+					USB_DEVICE_REMOTE_WAKEUP) {
+				if (req.wLength != 0)
+					break;
+				udc->remote_wakeup = 0;
+				err = isr_setup_status_phase(udc);
+			} else {
+				goto delegate;
+			}
+			break;
+		case USB_REQ_GET_STATUS:
+			if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
+			    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
+			    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
+				goto delegate;
+			if (le16_to_cpu(req.wLength) != 2 ||
+			    le16_to_cpu(req.wValue)  != 0)
+				break;
+			err = isr_get_status_response(udc, &req);
+			break;
+		case USB_REQ_SET_ADDRESS:
+			if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
+				goto delegate;
+			if (le16_to_cpu(req.wLength) != 0 ||
+			    le16_to_cpu(req.wIndex)  != 0)
+				break;
+			err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
+			if (err)
+				break;
+			err = isr_setup_status_phase(udc);
+			break;
+		case USB_REQ_SET_FEATURE:
+			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+					le16_to_cpu(req.wValue) ==
+					USB_ENDPOINT_HALT) {
+				if (req.wLength != 0)
+					break;
+				num  = le16_to_cpu(req.wIndex);
+				dir = num & USB_ENDPOINT_DIR_MASK;
+				num &= USB_ENDPOINT_NUMBER_MASK;
+				if (dir) /* TX */
+					num += hw_ep_max/2;
+
+				spin_unlock(udc->lock);
+				err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
+				spin_lock(udc->lock);
+				if (!err)
+					isr_setup_status_phase(udc);
+			} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
+				if (req.wLength != 0)
+					break;
+				switch (le16_to_cpu(req.wValue)) {
+				case USB_DEVICE_REMOTE_WAKEUP:
+					udc->remote_wakeup = 1;
+					err = isr_setup_status_phase(udc);
+					break;
+				case USB_DEVICE_TEST_MODE:
+					tmode = le16_to_cpu(req.wIndex) >> 8;
+					switch (tmode) {
+					case TEST_J:
+					case TEST_K:
+					case TEST_SE0_NAK:
+					case TEST_PACKET:
+					case TEST_FORCE_EN:
+						udc->test_mode = tmode;
+						err = isr_setup_status_phase(
+								udc);
+						break;
+					default:
+						break;
+					}
+				default:
+					goto delegate;
+				}
+			} else {
+				goto delegate;
+			}
+			break;
+		default:
+delegate:
+			if (req.wLength == 0)   /* no data phase */
+				udc->ep0_dir = TX;
+
+			spin_unlock(udc->lock);
+			err = udc->driver->setup(&udc->gadget, &req);
+			spin_lock(udc->lock);
+			break;
+		}
+
+		if (err < 0) {
+			dbg_event(_usb_addr(mEp), "ERROR", err);
+
+			spin_unlock(udc->lock);
+			if (usb_ep_set_halt(&mEp->ep))
+				err("error: ep_set_halt");
+			spin_lock(udc->lock);
+		}
+	}
+}
+
+/******************************************************************************
+ * ENDPT block
+ *****************************************************************************/
+/**
+ * ep_enable: configure endpoint, making it usable
+ *
+ * Check usb_ep_enable() at "usb_gadget.h" for details
+ */
+static int ep_enable(struct usb_ep *ep,
+		     const struct usb_endpoint_descriptor *desc)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	int retval = 0;
+	unsigned long flags;
+
+	trace("%p, %p", ep, desc);
+
+	if (ep == NULL || desc == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	/* only internal SW should enable ctrl endpts */
+
+	mEp->desc = desc;
+
+	if (!list_empty(&mEp->qh.queue))
+		warn("enabling a non-empty endpoint!");
+
+	mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
+	mEp->num  = usb_endpoint_num(desc);
+	mEp->type = usb_endpoint_type(desc);
+
+	mEp->ep.maxpacket = usb_endpoint_maxp(desc);
+
+	dbg_event(_usb_addr(mEp), "ENABLE", 0);
+
+	mEp->qh.ptr->cap = 0;
+
+	if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+		mEp->qh.ptr->cap |=  QH_IOS;
+	else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
+		mEp->qh.ptr->cap &= ~QH_MULT;
+	else
+		mEp->qh.ptr->cap &= ~QH_ZLT;
+
+	mEp->qh.ptr->cap |=
+		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+	mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
+
+	/*
+	 * Enable endpoints in the HW other than ep0 as ep0
+	 * is always enabled
+	 */
+	if (mEp->num)
+		retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return retval;
+}
+
+/**
+ * ep_disable: endpoint is no longer usable
+ *
+ * Check usb_ep_disable() at "usb_gadget.h" for details
+ */
+static int ep_disable(struct usb_ep *ep)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	int direction, retval = 0;
+	unsigned long flags;
+
+	trace("%p", ep);
+
+	if (ep == NULL)
+		return -EINVAL;
+	else if (mEp->desc == NULL)
+		return -EBUSY;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	/* only internal SW should disable ctrl endpts */
+
+	direction = mEp->dir;
+	do {
+		dbg_event(_usb_addr(mEp), "DISABLE", 0);
+
+		retval |= _ep_nuke(mEp);
+		retval |= hw_ep_disable(mEp->num, mEp->dir);
+
+		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+			mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+	} while (mEp->dir != direction);
+
+	mEp->desc = NULL;
+	mEp->ep.desc = NULL;
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return retval;
+}
+
+/**
+ * ep_alloc_request: allocate a request object to use with this endpoint
+ *
+ * Check usb_ep_alloc_request() at "usb_gadget.h" for details
+ */
+static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+	struct ci13xxx_ep  *mEp  = container_of(ep, struct ci13xxx_ep, ep);
+	struct ci13xxx_req *mReq = NULL;
+
+	trace("%p, %i", ep, gfp_flags);
+
+	if (ep == NULL) {
+		err("EINVAL");
+		return NULL;
+	}
+
+	mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
+	if (mReq != NULL) {
+		INIT_LIST_HEAD(&mReq->queue);
+		mReq->req.dma = DMA_ADDR_INVALID;
+
+		mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
+					   &mReq->dma);
+		if (mReq->ptr == NULL) {
+			kfree(mReq);
+			mReq = NULL;
+		}
+	}
+
+	dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
+
+	return (mReq == NULL) ? NULL : &mReq->req;
+}
+
+/**
+ * ep_free_request: frees a request object
+ *
+ * Check usb_ep_free_request() at "usb_gadget.h" for details
+ */
+static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
+	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+	unsigned long flags;
+
+	trace("%p, %p", ep, req);
+
+	if (ep == NULL || req == NULL) {
+		err("EINVAL");
+		return;
+	} else if (!list_empty(&mReq->queue)) {
+		err("EBUSY");
+		return;
+	}
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	if (mReq->ptr)
+		dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
+	kfree(mReq);
+
+	dbg_event(_usb_addr(mEp), "FREE", 0);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * ep_queue: queues (submits) an I/O request to an endpoint
+ *
+ * Check usb_ep_queue()* at usb_gadget.h" for details
+ */
+static int ep_queue(struct usb_ep *ep, struct usb_request *req,
+		    gfp_t __maybe_unused gfp_flags)
+{
+	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
+	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+	int retval = 0;
+	unsigned long flags;
+
+	trace("%p, %p, %X", ep, req, gfp_flags);
+
+	if (ep == NULL || req == NULL || mEp->desc == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+		if (req->length)
+			mEp = (_udc->ep0_dir == RX) ?
+				&_udc->ep0out : &_udc->ep0in;
+		if (!list_empty(&mEp->qh.queue)) {
+			_ep_nuke(mEp);
+			retval = -EOVERFLOW;
+			warn("endpoint ctrl %X nuked", _usb_addr(mEp));
+		}
+	}
+
+	/* first nuke then test link, e.g. previous status has not sent */
+	if (!list_empty(&mReq->queue)) {
+		retval = -EBUSY;
+		err("request already in queue");
+		goto done;
+	}
+
+	if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
+		req->length = (4 * CI13XXX_PAGE_SIZE);
+		retval = -EMSGSIZE;
+		warn("request length truncated");
+	}
+
+	dbg_queue(_usb_addr(mEp), req, retval);
+
+	/* push request */
+	mReq->req.status = -EINPROGRESS;
+	mReq->req.actual = 0;
+
+	retval = _hardware_enqueue(mEp, mReq);
+
+	if (retval == -EALREADY) {
+		dbg_event(_usb_addr(mEp), "QUEUE", retval);
+		retval = 0;
+	}
+	if (!retval)
+		list_add_tail(&mReq->queue, &mEp->qh.queue);
+
+ done:
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return retval;
+}
+
+/**
+ * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
+ *
+ * Check usb_ep_dequeue() at "usb_gadget.h" for details
+ */
+static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
+	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+	unsigned long flags;
+
+	trace("%p, %p", ep, req);
+
+	if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
+		mEp->desc == NULL || list_empty(&mReq->queue) ||
+		list_empty(&mEp->qh.queue))
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
+
+	hw_ep_flush(mEp->num, mEp->dir);
+
+	/* pop request */
+	list_del_init(&mReq->queue);
+	if (mReq->map) {
+		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		mReq->req.dma = DMA_ADDR_INVALID;
+		mReq->map     = 0;
+	}
+	req->status = -ECONNRESET;
+
+	if (mReq->req.complete != NULL) {
+		spin_unlock(mEp->lock);
+		mReq->req.complete(&mEp->ep, &mReq->req);
+		spin_lock(mEp->lock);
+	}
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return 0;
+}
+
+/**
+ * ep_set_halt: sets the endpoint halt feature
+ *
+ * Check usb_ep_set_halt() at "usb_gadget.h" for details
+ */
+static int ep_set_halt(struct usb_ep *ep, int value)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	int direction, retval = 0;
+	unsigned long flags;
+
+	trace("%p, %i", ep, value);
+
+	if (ep == NULL || mEp->desc == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+#ifndef STALL_IN
+	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+	if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
+	    !list_empty(&mEp->qh.queue)) {
+		spin_unlock_irqrestore(mEp->lock, flags);
+		return -EAGAIN;
+	}
+#endif
+
+	direction = mEp->dir;
+	do {
+		dbg_event(_usb_addr(mEp), "HALT", value);
+		retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
+
+		if (!value)
+			mEp->wedge = 0;
+
+		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+			mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+	} while (mEp->dir != direction);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return retval;
+}
+
+/**
+ * ep_set_wedge: sets the halt feature and ignores clear requests
+ *
+ * Check usb_ep_set_wedge() at "usb_gadget.h" for details
+ */
+static int ep_set_wedge(struct usb_ep *ep)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	unsigned long flags;
+
+	trace("%p", ep);
+
+	if (ep == NULL || mEp->desc == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	dbg_event(_usb_addr(mEp), "WEDGE", 0);
+	mEp->wedge = 1;
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+
+	return usb_ep_set_halt(ep);
+}
+
+/**
+ * ep_fifo_flush: flushes contents of a fifo
+ *
+ * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
+ */
+static void ep_fifo_flush(struct usb_ep *ep)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	unsigned long flags;
+
+	trace("%p", ep);
+
+	if (ep == NULL) {
+		err("%02X: -EINVAL", _usb_addr(mEp));
+		return;
+	}
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	dbg_event(_usb_addr(mEp), "FFLUSH", 0);
+	hw_ep_flush(mEp->num, mEp->dir);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * Endpoint-specific part of the API to the USB controller hardware
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_ep_ops usb_ep_ops = {
+	.enable	       = ep_enable,
+	.disable       = ep_disable,
+	.alloc_request = ep_alloc_request,
+	.free_request  = ep_free_request,
+	.queue	       = ep_queue,
+	.dequeue       = ep_dequeue,
+	.set_halt      = ep_set_halt,
+	.set_wedge     = ep_set_wedge,
+	.fifo_flush    = ep_fifo_flush,
+};
+
+/******************************************************************************
+ * GADGET block
+ *****************************************************************************/
+static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+	unsigned long flags;
+	int gadget_ready = 0;
+
+	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
+		return -EOPNOTSUPP;
+
+	spin_lock_irqsave(udc->lock, flags);
+	udc->vbus_active = is_active;
+	if (udc->driver)
+		gadget_ready = 1;
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	if (gadget_ready) {
+		if (is_active) {
+			pm_runtime_get_sync(&_gadget->dev);
+			hw_device_reset(udc);
+			hw_device_state(udc->ep0out.qh.dma);
+		} else {
+			hw_device_state(0);
+			if (udc->udc_driver->notify_event)
+				udc->udc_driver->notify_event(udc,
+				CI13XXX_CONTROLLER_STOPPED_EVENT);
+			_gadget_stop_activity(&udc->gadget);
+			pm_runtime_put_sync(&_gadget->dev);
+		}
+	}
+
+	return 0;
+}
+
+static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+{
+	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+	unsigned long flags;
+	int ret = 0;
+
+	trace();
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (!udc->remote_wakeup) {
+		ret = -EOPNOTSUPP;
+		trace("remote wakeup feature is not enabled\n");
+		goto out;
+	}
+	if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
+		ret = -EINVAL;
+		trace("port is not suspended\n");
+		goto out;
+	}
+	hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
+out:
+	spin_unlock_irqrestore(udc->lock, flags);
+	return ret;
+}
+
+static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+
+	if (udc->transceiver)
+		return usb_phy_set_power(udc->transceiver, mA);
+	return -ENOTSUPP;
+}
+
+static int ci13xxx_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int ci13xxx_stop(struct usb_gadget_driver *driver);
+/**
+ * Device operations part of the API to the USB controller hardware,
+ * which don't involve endpoints (or i/o)
+ * Check  "usb_gadget.h" for details
+ */
+static const struct usb_gadget_ops usb_gadget_ops = {
+	.vbus_session	= ci13xxx_vbus_session,
+	.wakeup		= ci13xxx_wakeup,
+	.vbus_draw	= ci13xxx_vbus_draw,
+	.start		= ci13xxx_start,
+	.stop		= ci13xxx_stop,
+};
+
+/**
+ * ci13xxx_start: register a gadget driver
+ * @driver: the driver being registered
+ * @bind: the driver's bind callback
+ *
+ * Check ci13xxx_start() at <linux/usb/gadget.h> for details.
+ * Interrupts are enabled here.
+ */
+static int ci13xxx_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct ci13xxx *udc = _udc;
+	unsigned long flags;
+	int i, j;
+	int retval = -ENOMEM;
+
+	trace("%p", driver);
+
+	if (driver             == NULL ||
+	    bind               == NULL ||
+	    driver->setup      == NULL ||
+	    driver->disconnect == NULL)
+		return -EINVAL;
+	else if (udc         == NULL)
+		return -ENODEV;
+	else if (udc->driver != NULL)
+		return -EBUSY;
+
+	/* alloc resources */
+	udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
+				       sizeof(struct ci13xxx_qh),
+				       64, CI13XXX_PAGE_SIZE);
+	if (udc->qh_pool == NULL)
+		return -ENOMEM;
+
+	udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
+				       sizeof(struct ci13xxx_td),
+				       64, CI13XXX_PAGE_SIZE);
+	if (udc->td_pool == NULL) {
+		dma_pool_destroy(udc->qh_pool);
+		udc->qh_pool = NULL;
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+
+	info("hw_ep_max = %d", hw_ep_max);
+
+	udc->gadget.dev.driver = NULL;
+
+	retval = 0;
+	for (i = 0; i < hw_ep_max/2; i++) {
+		for (j = RX; j <= TX; j++) {
+			int k = i + j * hw_ep_max/2;
+			struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
+
+			scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+					(j == TX)  ? "in" : "out");
+
+			mEp->lock         = udc->lock;
+			mEp->device       = &udc->gadget.dev;
+			mEp->td_pool      = udc->td_pool;
+
+			mEp->ep.name      = mEp->name;
+			mEp->ep.ops       = &usb_ep_ops;
+			mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+
+			INIT_LIST_HEAD(&mEp->qh.queue);
+			spin_unlock_irqrestore(udc->lock, flags);
+			mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
+					&mEp->qh.dma);
+			spin_lock_irqsave(udc->lock, flags);
+			if (mEp->qh.ptr == NULL)
+				retval = -ENOMEM;
+			else
+				memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+
+			/* skip ep0 out and in endpoints */
+			if (i == 0)
+				continue;
+
+			list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+		}
+	}
+	if (retval)
+		goto done;
+	spin_unlock_irqrestore(udc->lock, flags);
+	udc->ep0out.ep.desc = &ctrl_endpt_out_desc;
+	retval = usb_ep_enable(&udc->ep0out.ep);
+	if (retval)
+		return retval;
+
+	udc->ep0in.ep.desc = &ctrl_endpt_in_desc;
+	retval = usb_ep_enable(&udc->ep0in.ep);
+	if (retval)
+		return retval;
+	spin_lock_irqsave(udc->lock, flags);
+
+	udc->gadget.ep0 = &udc->ep0in.ep;
+	/* bind gadget */
+	driver->driver.bus     = NULL;
+	udc->gadget.dev.driver = &driver->driver;
+
+	spin_unlock_irqrestore(udc->lock, flags);
+	retval = bind(&udc->gadget);                /* MAY SLEEP */
+	spin_lock_irqsave(udc->lock, flags);
+
+	if (retval) {
+		udc->gadget.dev.driver = NULL;
+		goto done;
+	}
+
+	udc->driver = driver;
+	pm_runtime_get_sync(&udc->gadget.dev);
+	if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
+		if (udc->vbus_active) {
+			if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
+				hw_device_reset(udc);
+		} else {
+			pm_runtime_put_sync(&udc->gadget.dev);
+			goto done;
+		}
+	}
+
+	retval = hw_device_state(udc->ep0out.qh.dma);
+	if (retval)
+		pm_runtime_put_sync(&udc->gadget.dev);
+
+ done:
+	spin_unlock_irqrestore(udc->lock, flags);
+	return retval;
+}
+
+/**
+ * ci13xxx_stop: unregister a gadget driver
+ *
+ * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
+ */
+static int ci13xxx_stop(struct usb_gadget_driver *driver)
+{
+	struct ci13xxx *udc = _udc;
+	unsigned long i, flags;
+
+	trace("%p", driver);
+
+	if (driver             == NULL ||
+	    driver->unbind     == NULL ||
+	    driver->setup      == NULL ||
+	    driver->disconnect == NULL ||
+	    driver             != udc->driver)
+		return -EINVAL;
+
+	spin_lock_irqsave(udc->lock, flags);
+
+	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
+			udc->vbus_active) {
+		hw_device_state(0);
+		if (udc->udc_driver->notify_event)
+			udc->udc_driver->notify_event(udc,
+			CI13XXX_CONTROLLER_STOPPED_EVENT);
+		spin_unlock_irqrestore(udc->lock, flags);
+		_gadget_stop_activity(&udc->gadget);
+		spin_lock_irqsave(udc->lock, flags);
+		pm_runtime_put(&udc->gadget.dev);
+	}
+
+	/* unbind gadget */
+	spin_unlock_irqrestore(udc->lock, flags);
+	driver->unbind(&udc->gadget);               /* MAY SLEEP */
+	spin_lock_irqsave(udc->lock, flags);
+
+	udc->gadget.dev.driver = NULL;
+
+	/* free resources */
+	for (i = 0; i < hw_ep_max; i++) {
+		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+
+		if (!list_empty(&mEp->ep.ep_list))
+			list_del_init(&mEp->ep.ep_list);
+
+		if (mEp->qh.ptr != NULL)
+			dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
+	}
+
+	udc->gadget.ep0 = NULL;
+	udc->driver = NULL;
+
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	if (udc->td_pool != NULL) {
+		dma_pool_destroy(udc->td_pool);
+		udc->td_pool = NULL;
+	}
+	if (udc->qh_pool != NULL) {
+		dma_pool_destroy(udc->qh_pool);
+		udc->qh_pool = NULL;
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+ * BUS block
+ *****************************************************************************/
+/**
+ * udc_irq: global interrupt handler
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * It locks access to registers
+ */
+static irqreturn_t udc_irq(void)
+{
+	struct ci13xxx *udc = _udc;
+	irqreturn_t retval;
+	u32 intr;
+
+	trace();
+
+	if (udc == NULL) {
+		err("ENODEV");
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(udc->lock);
+
+	if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
+		if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
+				USBMODE_CM_DEVICE) {
+			spin_unlock(udc->lock);
+			return IRQ_NONE;
+		}
+	}
+	intr = hw_test_and_clear_intr_active();
+	if (intr) {
+		isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
+		isr_statistics.hndl.idx &= ISR_MASK;
+		isr_statistics.hndl.cnt++;
+
+		/* order defines priority - do NOT change it */
+		if (USBi_URI & intr) {
+			isr_statistics.uri++;
+			isr_reset_handler(udc);
+		}
+		if (USBi_PCI & intr) {
+			isr_statistics.pci++;
+			udc->gadget.speed = hw_port_is_high_speed() ?
+				USB_SPEED_HIGH : USB_SPEED_FULL;
+			if (udc->suspended && udc->driver->resume) {
+				spin_unlock(udc->lock);
+				udc->driver->resume(&udc->gadget);
+				spin_lock(udc->lock);
+				udc->suspended = 0;
+			}
+		}
+		if (USBi_UEI & intr)
+			isr_statistics.uei++;
+		if (USBi_UI  & intr) {
+			isr_statistics.ui++;
+			isr_tr_complete_handler(udc);
+		}
+		if (USBi_SLI & intr) {
+			if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+			    udc->driver->suspend) {
+				udc->suspended = 1;
+				spin_unlock(udc->lock);
+				udc->driver->suspend(&udc->gadget);
+				spin_lock(udc->lock);
+			}
+			isr_statistics.sli++;
+		}
+		retval = IRQ_HANDLED;
+	} else {
+		isr_statistics.none++;
+		retval = IRQ_NONE;
+	}
+	spin_unlock(udc->lock);
+
+	return retval;
+}
+
+/**
+ * udc_release: driver release function
+ * @dev: device
+ *
+ * Currently does nothing
+ */
+static void udc_release(struct device *dev)
+{
+	trace("%p", dev);
+
+	if (dev == NULL)
+		err("EINVAL");
+}
+
+/**
+ * udc_probe: parent probe must call this to initialize UDC
+ * @dev:  parent device
+ * @regs: registers base address
+ * @name: driver name
+ *
+ * This function returns an error code
+ * No interrupts active, the IRQ has not been requested yet
+ * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
+ */
+static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
+		void __iomem *regs)
+{
+	struct ci13xxx *udc;
+	int retval = 0;
+
+	trace("%p, %p, %p", dev, regs, driver->name);
+
+	if (dev == NULL || regs == NULL || driver == NULL ||
+			driver->name == NULL)
+		return -EINVAL;
+
+	udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
+	if (udc == NULL)
+		return -ENOMEM;
+
+	udc->lock = &udc_lock;
+	udc->regs = regs;
+	udc->udc_driver = driver;
+
+	udc->gadget.ops          = &usb_gadget_ops;
+	udc->gadget.speed        = USB_SPEED_UNKNOWN;
+	udc->gadget.max_speed    = USB_SPEED_HIGH;
+	udc->gadget.is_otg       = 0;
+	udc->gadget.name         = driver->name;
+
+	INIT_LIST_HEAD(&udc->gadget.ep_list);
+	udc->gadget.ep0 = NULL;
+
+	dev_set_name(&udc->gadget.dev, "gadget");
+	udc->gadget.dev.dma_mask = dev->dma_mask;
+	udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
+	udc->gadget.dev.parent   = dev;
+	udc->gadget.dev.release  = udc_release;
+
+	retval = hw_device_init(regs);
+	if (retval < 0)
+		goto free_udc;
+
+	udc->transceiver = usb_get_transceiver();
+
+	if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
+		if (udc->transceiver == NULL) {
+			retval = -ENODEV;
+			goto free_udc;
+		}
+	}
+
+	if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
+		retval = hw_device_reset(udc);
+		if (retval)
+			goto put_transceiver;
+	}
+
+	retval = device_register(&udc->gadget.dev);
+	if (retval) {
+		put_device(&udc->gadget.dev);
+		goto put_transceiver;
+	}
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	retval = dbg_create_files(&udc->gadget.dev);
+#endif
+	if (retval)
+		goto unreg_device;
+
+	if (udc->transceiver) {
+		retval = otg_set_peripheral(udc->transceiver->otg,
+						&udc->gadget);
+		if (retval)
+			goto remove_dbg;
+	}
+
+	retval = usb_add_gadget_udc(dev, &udc->gadget);
+	if (retval)
+		goto remove_trans;
+
+	pm_runtime_no_callbacks(&udc->gadget.dev);
+	pm_runtime_enable(&udc->gadget.dev);
+
+	_udc = udc;
+	return retval;
+
+remove_trans:
+	if (udc->transceiver) {
+		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+		usb_put_transceiver(udc->transceiver);
+	}
+
+	err("error = %i", retval);
+remove_dbg:
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	dbg_remove_files(&udc->gadget.dev);
+#endif
+unreg_device:
+	device_unregister(&udc->gadget.dev);
+put_transceiver:
+	if (udc->transceiver)
+		usb_put_transceiver(udc->transceiver);
+free_udc:
+	kfree(udc);
+	_udc = NULL;
+	return retval;
+}
+
+/**
+ * udc_remove: parent remove must call this to remove UDC
+ *
+ * No interrupts active, the IRQ has been released
+ */
+static void udc_remove(void)
+{
+	struct ci13xxx *udc = _udc;
+
+	if (udc == NULL) {
+		err("EINVAL");
+		return;
+	}
+	usb_del_gadget_udc(&udc->gadget);
+
+	if (udc->transceiver) {
+		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+		usb_put_transceiver(udc->transceiver);
+	}
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	dbg_remove_files(&udc->gadget.dev);
+#endif
+	device_unregister(&udc->gadget.dev);
+
+	kfree(udc);
+	_udc = NULL;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_udc.h
new file mode 100644
index 0000000..0d31af5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ci13xxx_udc.h
@@ -0,0 +1,227 @@
+/*
+ * ci13xxx_udc.h - structures, registers, and macros MIPS USB IP core
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description: MIPS USB IP core family device controller
+ *              Structures, registers and logging macros
+ */
+
+#ifndef _CI13XXX_h_
+#define _CI13XXX_h_
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+#define CI13XXX_PAGE_SIZE  4096ul /* page size for TD's */
+#define ENDPT_MAX          (32)
+#define CTRL_PAYLOAD_MAX   (64)
+#define RX        (0)  /* similar to USB_DIR_OUT but can be used as an index */
+#define TX        (1)  /* similar to USB_DIR_IN  but can be used as an index */
+
+/******************************************************************************
+ * STRUCTURES
+ *****************************************************************************/
+/* DMA layout of transfer descriptors */
+struct ci13xxx_td {
+	/* 0 */
+	u32 next;
+#define TD_TERMINATE          BIT(0)
+#define TD_ADDR_MASK          (0xFFFFFFEUL << 5)
+	/* 1 */
+	u32 token;
+#define TD_STATUS             (0x00FFUL <<  0)
+#define TD_STATUS_TR_ERR      BIT(3)
+#define TD_STATUS_DT_ERR      BIT(5)
+#define TD_STATUS_HALTED      BIT(6)
+#define TD_STATUS_ACTIVE      BIT(7)
+#define TD_MULTO              (0x0003UL << 10)
+#define TD_IOC                BIT(15)
+#define TD_TOTAL_BYTES        (0x7FFFUL << 16)
+	/* 2 */
+	u32 page[5];
+#define TD_CURR_OFFSET        (0x0FFFUL <<  0)
+#define TD_FRAME_NUM          (0x07FFUL <<  0)
+#define TD_RESERVED_MASK      (0x0FFFUL <<  0)
+} __attribute__ ((packed));
+
+/* DMA layout of queue heads */
+struct ci13xxx_qh {
+	/* 0 */
+	u32 cap;
+#define QH_IOS                BIT(15)
+#define QH_MAX_PKT            (0x07FFUL << 16)
+#define QH_ZLT                BIT(29)
+#define QH_MULT               (0x0003UL << 30)
+	/* 1 */
+	u32 curr;
+	/* 2 - 8 */
+	struct ci13xxx_td        td;
+	/* 9 */
+	u32 RESERVED;
+	struct usb_ctrlrequest   setup;
+} __attribute__ ((packed));
+
+/* Extension of usb_request */
+struct ci13xxx_req {
+	struct usb_request   req;
+	unsigned             map;
+	struct list_head     queue;
+	struct ci13xxx_td   *ptr;
+	dma_addr_t           dma;
+	struct ci13xxx_td   *zptr;
+	dma_addr_t           zdma;
+};
+
+/* Extension of usb_ep */
+struct ci13xxx_ep {
+	struct usb_ep                          ep;
+	const struct usb_endpoint_descriptor  *desc;
+	u8                                     dir;
+	u8                                     num;
+	u8                                     type;
+	char                                   name[16];
+	struct {
+		struct list_head   queue;
+		struct ci13xxx_qh *ptr;
+		dma_addr_t         dma;
+	}                                      qh;
+	int                                    wedge;
+
+	/* global resources */
+	spinlock_t                            *lock;
+	struct device                         *device;
+	struct dma_pool                       *td_pool;
+};
+
+struct ci13xxx;
+struct ci13xxx_udc_driver {
+	const char	*name;
+	unsigned long	 flags;
+#define CI13XXX_REGS_SHARED		BIT(0)
+#define CI13XXX_REQUIRE_TRANSCEIVER	BIT(1)
+#define CI13XXX_PULLUP_ON_VBUS		BIT(2)
+#define CI13XXX_DISABLE_STREAMING	BIT(3)
+
+#define CI13XXX_CONTROLLER_RESET_EVENT		0
+#define CI13XXX_CONTROLLER_STOPPED_EVENT	1
+	void	(*notify_event) (struct ci13xxx *udc, unsigned event);
+};
+
+/* CI13XXX UDC descriptor & global resources */
+struct ci13xxx {
+	spinlock_t		  *lock;      /* ctrl register bank access */
+	void __iomem              *regs;      /* registers address space */
+
+	struct dma_pool           *qh_pool;   /* DMA pool for queue heads */
+	struct dma_pool           *td_pool;   /* DMA pool for transfer descs */
+	struct usb_request        *status;    /* ep0 status request */
+
+	struct usb_gadget          gadget;     /* USB slave device */
+	struct ci13xxx_ep          ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+	u32                        ep0_dir;    /* ep0 direction */
+#define ep0out ci13xxx_ep[0]
+#define ep0in  ci13xxx_ep[hw_ep_max / 2]
+	u8                         remote_wakeup; /* Is remote wakeup feature
+							enabled by the host? */
+	u8                         suspended;  /* suspended by the host */
+	u8                         test_mode;  /* the selected test mode */
+
+	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
+	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
+	int                        vbus_active; /* is VBUS active */
+	struct usb_phy            *transceiver; /* Transceiver struct */
+};
+
+/******************************************************************************
+ * REGISTERS
+ *****************************************************************************/
+/* register size */
+#define REG_BITS   (32)
+
+/* HCCPARAMS */
+#define HCCPARAMS_LEN         BIT(17)
+
+/* DCCPARAMS */
+#define DCCPARAMS_DEN         (0x1F << 0)
+#define DCCPARAMS_DC          BIT(7)
+
+/* TESTMODE */
+#define TESTMODE_FORCE        BIT(0)
+
+/* USBCMD */
+#define USBCMD_RS             BIT(0)
+#define USBCMD_RST            BIT(1)
+#define USBCMD_SUTW           BIT(13)
+#define USBCMD_ATDTW          BIT(14)
+
+/* USBSTS & USBINTR */
+#define USBi_UI               BIT(0)
+#define USBi_UEI              BIT(1)
+#define USBi_PCI              BIT(2)
+#define USBi_URI              BIT(6)
+#define USBi_SLI              BIT(8)
+
+/* DEVICEADDR */
+#define DEVICEADDR_USBADRA    BIT(24)
+#define DEVICEADDR_USBADR     (0x7FUL << 25)
+
+/* PORTSC */
+#define PORTSC_FPR            BIT(6)
+#define PORTSC_SUSP           BIT(7)
+#define PORTSC_HSP            BIT(9)
+#define PORTSC_PTC            (0x0FUL << 16)
+
+/* DEVLC */
+#define DEVLC_PSPD            (0x03UL << 25)
+#define    DEVLC_PSPD_HS      (0x02UL << 25)
+
+/* USBMODE */
+#define USBMODE_CM            (0x03UL <<  0)
+#define    USBMODE_CM_IDLE    (0x00UL <<  0)
+#define    USBMODE_CM_DEVICE  (0x02UL <<  0)
+#define    USBMODE_CM_HOST    (0x03UL <<  0)
+#define USBMODE_SLOM          BIT(3)
+#define USBMODE_SDIS          BIT(4)
+
+/* ENDPTCTRL */
+#define ENDPTCTRL_RXS         BIT(0)
+#define ENDPTCTRL_RXT         (0x03UL <<  2)
+#define ENDPTCTRL_RXR         BIT(6)         /* reserved for port 0 */
+#define ENDPTCTRL_RXE         BIT(7)
+#define ENDPTCTRL_TXS         BIT(16)
+#define ENDPTCTRL_TXT         (0x03UL << 18)
+#define ENDPTCTRL_TXR         BIT(22)        /* reserved for port 0 */
+#define ENDPTCTRL_TXE         BIT(23)
+
+/******************************************************************************
+ * LOGGING
+ *****************************************************************************/
+#define ci13xxx_printk(level, format, args...) \
+do { \
+	if (_udc == NULL) \
+		printk(level "[%s] " format "\n", __func__, ## args); \
+	else \
+		dev_printk(level, _udc->gadget.dev.parent, \
+			   "[%s] " format "\n", __func__, ## args); \
+} while (0)
+
+#define err(format, args...)    ci13xxx_printk(KERN_ERR, format, ## args)
+#define warn(format, args...)   ci13xxx_printk(KERN_WARNING, format, ## args)
+#define info(format, args...)   ci13xxx_printk(KERN_INFO, format, ## args)
+
+#ifdef TRACE
+#define trace(format, args...)      ci13xxx_printk(KERN_DEBUG, format, ## args)
+#define dbg_trace(format, args...)  dev_dbg(dev, format, ##args)
+#else
+#define trace(format, args...)      do {} while (0)
+#define dbg_trace(format, args...)  do {} while (0)
+#endif
+
+#endif	/* _CI13XXX_h_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/composite.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/composite.c
new file mode 100755
index 0000000..0dc9051
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/composite.c
@@ -0,0 +1,1760 @@
+/*
+ * composite.c - infrastructure for Composite USB Gadgets
+ *
+ * Copyright (C) 2006-2008 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+#pragma GCC optimize("O0")
+
+
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/utsname.h>
+
+#include <linux/usb/composite.h>
+#include <asm/unaligned.h>
+#include <mach/highspeed_debug.h>
+
+#if 1
+#ifndef DEBUG
+#define DEBUG
+#endif
+#ifndef CONFIG_DYNAMIC_DEBUG
+#define CONFIG_DYNAMIC_DEBUG
+#endif
+#endif
+/*
+ * The code in this file is utility code, used to build a gadget driver
+ * from one or more "function" drivers, one or more "configuration"
+ * objects, and a "usb_composite_driver" by gluing them together along
+ * with the relevant device-wide data.
+ */
+
+/* big enough to hold our biggest descriptor */
+#define USB_BUFSIZ	2048 //1024
+
+static struct usb_composite_driver *composite;
+static int (*composite_gadget_bind)(struct usb_composite_dev *cdev);
+
+/* Some systems will need runtime overrides for the  product identifiers
+ * published in the device descriptor, either numbers or strings or both.
+ * String parameters are in UTF-8 (superset of ASCII's 7 bit characters).
+ */
+
+static ushort idVendor;
+module_param(idVendor, ushort, 0);
+MODULE_PARM_DESC(idVendor, "USB Vendor ID");
+
+static ushort idProduct;
+module_param(idProduct, ushort, 0);
+MODULE_PARM_DESC(idProduct, "USB Product ID");
+
+static ushort bcdDevice;
+module_param(bcdDevice, ushort, 0);
+MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
+
+static char *iManufacturer;
+module_param(iManufacturer, charp, 0);
+MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
+
+static char *iProduct;
+module_param(iProduct, charp, 0);
+MODULE_PARM_DESC(iProduct, "USB Product string");
+
+static char *iSerialNumber;
+module_param(iSerialNumber, charp, 0);
+MODULE_PARM_DESC(iSerialNumber, "SerialNumber string");
+
+static char *iConfiguration;
+module_param(iConfiguration, charp, 0);
+MODULE_PARM_DESC(iConfiguration, "USB Configuration string");
+static char composite_manufacturer[50];
+
+/*-------------------------------------------------------------------------*/
+/**
+ * next_ep_desc() - advance to the next EP descriptor
+ * @t: currect pointer within descriptor array
+ *
+ * Return: next EP descriptor or NULL
+ *
+ * Iterate over @t until either EP descriptor found or
+ * NULL (that indicates end of list) encountered
+ */
+static struct usb_descriptor_header**
+next_ep_desc(struct usb_descriptor_header **t)
+{
+	for (; *t; t++) {
+		if ((*t)->bDescriptorType == USB_DT_ENDPOINT)
+			return t;
+	}
+	return NULL;
+}
+
+/*
+ * for_each_ep_desc()- iterate over endpoint descriptors in the
+ *		descriptors list
+ * @start:	pointer within descriptor array.
+ * @ep_desc:	endpoint descriptor to use as the loop cursor
+ */
+#define for_each_ep_desc(start, ep_desc) \
+	for (ep_desc = next_ep_desc(start); \
+	      ep_desc; ep_desc = next_ep_desc(ep_desc+1))
+
+/**
+ * config_ep_by_speed() - configures the given endpoint
+ * according to gadget speed.
+ * @g: pointer to the gadget
+ * @f: usb function
+ * @_ep: the endpoint to configure
+ *
+ * Return: error code, 0 on success
+ *
+ * This function chooses the right descriptors for a given
+ * endpoint according to gadget speed and saves it in the
+ * endpoint desc field. If the endpoint already has a descriptor
+ * assigned to it - overwrites it with currently corresponding
+ * descriptor. The endpoint maxpacket field is updated according
+ * to the chosen descriptor.
+ * Note: the supplied function should hold all the descriptors
+ * for supported speeds
+ */
+int config_ep_by_speed(struct usb_gadget *g,
+			struct usb_function *f,
+			struct usb_ep *_ep)
+{
+	struct usb_endpoint_descriptor *chosen_desc = NULL;
+	struct usb_descriptor_header **speed_desc = NULL;
+
+	struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
+	int want_comp_desc = 0;
+
+	struct usb_descriptor_header **d_spd; /* cursor for speed desc */
+
+	if (!g || !f || !_ep)
+		return -EIO;
+
+	/* select desired speed */
+	switch (g->speed) {
+	case USB_SPEED_SUPER:
+		if (gadget_is_superspeed(g)) {
+			speed_desc = f->ss_descriptors;
+			want_comp_desc = 1;
+			break;
+		}
+		/* else: Fall trough */
+	case USB_SPEED_HIGH:
+		if (gadget_is_dualspeed(g)) {
+			speed_desc = f->hs_descriptors;
+			break;
+		}
+		/* else: fall through */
+	default:
+		speed_desc = f->descriptors;
+	}
+	/* find descriptors */
+	for_each_ep_desc(speed_desc, d_spd) {
+		chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
+		if (chosen_desc->bEndpointAddress == _ep->address)
+			goto ep_found;
+	}
+	return -EIO;
+
+ep_found:
+	/* commit results */
+	_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
+	_ep->desc = chosen_desc;
+	_ep->comp_desc = NULL;
+	_ep->maxburst = 0;
+	_ep->mult = 0;
+	if (!want_comp_desc)
+		return 0;
+
+	/*
+	 * Companion descriptor should follow EP descriptor
+	 * USB 3.0 spec, #9.6.7
+	 */
+	comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd);
+	if (!comp_desc ||
+	    (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP))
+		return -EIO;
+	_ep->comp_desc = comp_desc;
+	if (g->speed == USB_SPEED_SUPER) {
+		switch (usb_endpoint_type(_ep->desc)) {
+		case USB_ENDPOINT_XFER_ISOC:
+			/* mult: bits 1:0 of bmAttributes */
+			_ep->mult = comp_desc->bmAttributes & 0x3;
+		case USB_ENDPOINT_XFER_BULK:
+		case USB_ENDPOINT_XFER_INT:
+			_ep->maxburst = comp_desc->bMaxBurst;
+			break;
+		default:
+			/* Do nothing for control endpoints */
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ * usb_add_function() - add a function to a configuration
+ * @config: the configuration
+ * @function: the function being added
+ * Context: single threaded during gadget setup
+ *
+ * After initialization, each configuration must have one or more
+ * functions added to it.  Adding a function involves calling its @bind()
+ * method to allocate resources such as interface and string identifiers
+ * and endpoints.
+ *
+ * This function returns the value of the function's bind(), which is
+ * zero for success else a negative errno value.
+ */
+int usb_add_function(struct usb_configuration *config,
+		struct usb_function *function)
+{
+	int	value = -EINVAL;
+
+	DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
+			function->name, function,
+			config->label, config);
+
+	if (!function->set_alt || !function->disable)
+		goto done;
+
+	function->config = config;
+	list_add_tail(&function->list, &config->functions);
+
+	/* REVISIT *require* function->bind? */
+	if (function->bind) {
+		value = function->bind(config, function);
+		if (value < 0) {
+			list_del(&function->list);
+			function->config = NULL;
+		}
+	} else
+		value = 0;
+
+	/* We allow configurations that don't work at both speeds.
+	 * If we run into a lowspeed Linux system, treat it the same
+	 * as full speed ... it's the function drivers that will need
+	 * to avoid bulk and ISO transfers.
+	 */
+	if (!config->fullspeed && function->descriptors)
+		config->fullspeed = true;
+	if (!config->highspeed && function->hs_descriptors)
+		config->highspeed = true;
+	if (!config->superspeed && function->ss_descriptors)
+		config->superspeed = true;
+
+done:
+	if (value)
+		DBG(config->cdev, "adding '%s'/%p --> %d\n",
+				function->name, function, value);
+	return value;
+}
+
+/**
+ * usb_function_deactivate - prevent function and gadget enumeration
+ * @function: the function that isn't yet ready to respond
+ *
+ * Blocks response of the gadget driver to host enumeration by
+ * preventing the data line pullup from being activated.  This is
+ * normally called during @bind() processing to change from the
+ * initial "ready to respond" state, or when a required resource
+ * becomes available.
+ *
+ * For example, drivers that serve as a passthrough to a userspace
+ * daemon can block enumeration unless that daemon (such as an OBEX,
+ * MTP, or print server) is ready to handle host requests.
+ *
+ * Not all systems support software control of their USB peripheral
+ * data pullups.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_function_deactivate(struct usb_function *function)
+{
+	struct usb_composite_dev	*cdev = function->config->cdev;
+	unsigned long			flags;
+	int				status = 0;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+
+	if (cdev->deactivations == 0)
+		status = usb_gadget_disconnect(cdev->gadget);
+	if (status == 0)
+		cdev->deactivations++;
+
+	spin_unlock_irqrestore(&cdev->lock, flags);
+	return status;
+}
+
+/**
+ * usb_function_activate - allow function and gadget enumeration
+ * @function: function on which usb_function_activate() was called
+ *
+ * Reverses effect of usb_function_deactivate().  If no more functions
+ * are delaying their activation, the gadget driver will respond to
+ * host enumeration procedures.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_function_activate(struct usb_function *function)
+{
+	struct usb_composite_dev	*cdev = function->config->cdev;
+	int				status = 0;
+
+	spin_lock(&cdev->lock);
+
+	if (WARN_ON(cdev->deactivations == 0))
+		status = -EINVAL;
+	else {
+		cdev->deactivations--;
+		if (cdev->deactivations == 0)
+			status = usb_gadget_connect(cdev->gadget);
+	}
+
+	spin_unlock(&cdev->lock);
+	return status;
+}
+
+/**
+ * usb_interface_id() - allocate an unused interface ID
+ * @config: configuration associated with the interface
+ * @function: function handling the interface
+ * Context: single threaded during gadget setup
+ *
+ * usb_interface_id() is called from usb_function.bind() callbacks to
+ * allocate new interface IDs.  The function driver will then store that
+ * ID in interface, association, CDC union, and other descriptors.  It
+ * will also handle any control requests targeted at that interface,
+ * particularly changing its altsetting via set_alt().  There may
+ * also be class-specific or vendor-specific requests to handle.
+ *
+ * All interface identifier should be allocated using this routine, to
+ * ensure that for example different functions don't wrongly assign
+ * different meanings to the same identifier.  Note that since interface
+ * identifiers are configuration-specific, functions used in more than
+ * one configuration (or more than once in a given configuration) need
+ * multiple versions of the relevant descriptors.
+ *
+ * Returns the interface ID which was allocated; or -ENODEV if no
+ * more interface IDs can be allocated.
+ */
+int usb_interface_id(struct usb_configuration *config,
+		struct usb_function *function)
+{
+	unsigned id = config->next_interface_id;
+
+	if (id < MAX_CONFIG_INTERFACES) {
+		config->interface[id] = function;
+		config->next_interface_id = id + 1;
+		return id;
+	}
+	return -ENODEV;
+}
+
+static int config_buf(struct usb_configuration *config,
+		enum usb_device_speed speed, void *buf, u8 type)
+{
+	struct usb_config_descriptor	*c = buf;
+	void				*next = buf + USB_DT_CONFIG_SIZE;
+	int				len = USB_BUFSIZ - USB_DT_CONFIG_SIZE;
+	struct usb_function		*f;
+	int				status;
+
+	/* write the config descriptor */
+	c = buf;
+	c->bLength = USB_DT_CONFIG_SIZE;
+	c->bDescriptorType = type;
+	/* wTotalLength is written later */
+	c->bNumInterfaces = config->next_interface_id;
+	c->bConfigurationValue = config->bConfigurationValue;
+	c->iConfiguration = config->iConfiguration;
+	c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes;
+	c->bMaxPower = config->bMaxPower ? : (CONFIG_USB_GADGET_VBUS_DRAW / 2);
+
+	/* There may be e.g. OTG descriptors */
+	if (config->descriptors) {
+		status = usb_descriptor_fillbuf(next, len,
+				config->descriptors);
+		if (status < 0)
+			return status;
+		len -= status;
+		next += status;
+	}
+
+	/* add each function's descriptors */
+	list_for_each_entry(f, &config->functions, list) {
+		struct usb_descriptor_header **descriptors;
+
+		switch (speed) {
+		case USB_SPEED_SUPER:
+			descriptors = f->ss_descriptors;
+			break;
+		case USB_SPEED_HIGH:
+			descriptors = f->hs_descriptors;
+			break;
+		default:
+			descriptors = f->descriptors;
+		}
+
+		if (!descriptors)
+			continue;
+		status = usb_descriptor_fillbuf(next, len,
+			(const struct usb_descriptor_header **) descriptors);
+		if (status < 0)
+			return status;
+		len -= status;
+		next += status;
+	}
+
+	len = next - buf;
+	c->wTotalLength = cpu_to_le16(len);
+	return len;
+}
+
+static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
+{
+	struct usb_gadget		*gadget = cdev->gadget;
+	struct usb_configuration	*c;
+	u8				type = w_value >> 8;
+	enum usb_device_speed		speed = USB_SPEED_UNKNOWN;
+
+	if (gadget->speed == USB_SPEED_SUPER)
+		speed = gadget->speed;
+	else if (gadget_is_dualspeed(gadget)) {
+		int	hs = 0;
+		if (gadget->speed == USB_SPEED_HIGH)
+			hs = 1;
+		if (type == USB_DT_OTHER_SPEED_CONFIG)
+			hs = !hs;
+		if (hs)
+			speed = USB_SPEED_HIGH;
+
+	}
+
+	/* This is a lookup by config *INDEX* */
+	w_value &= 0xff;
+	if(list_empty(&cdev->configs)){
+		USBSTACK_DBG("#### NO CONFIGS in THE LIST");
+	}
+	list_for_each_entry(c, &cdev->configs, list) {
+	/* ignore configs that won't work at this speed */
+#if 1 //xjy test
+		switch (speed) {
+		case USB_SPEED_SUPER:
+			if (!c->superspeed)
+				continue;
+			break;
+		case USB_SPEED_HIGH:
+			if (!c->highspeed)
+				continue;
+			break;
+		default:
+			if (!c->fullspeed)
+				continue;
+		}
+#endif
+		if (w_value == 0){
+			return config_buf(c, speed, cdev->req->buf, type);
+		}
+		w_value--;
+	}
+	return -EINVAL;
+}
+
+static int count_configs(struct usb_composite_dev *cdev, unsigned type)
+{
+	struct usb_gadget		*gadget = cdev->gadget;
+	struct usb_configuration	*c;
+	unsigned			count = 0;
+	int				hs = 0;
+	int				ss = 0;
+
+	if (gadget_is_dualspeed(gadget)) {
+		if (gadget->speed == USB_SPEED_HIGH)
+			hs = 1;
+		if (gadget->speed == USB_SPEED_SUPER)
+			ss = 1;
+		if (type == USB_DT_DEVICE_QUALIFIER)
+			hs = !hs;
+	}
+	list_for_each_entry(c, &cdev->configs, list) {
+		/* ignore configs that won't work at this speed */
+		if (ss) {
+			if (!c->superspeed)
+				continue;
+		} else if (hs) {
+			if (!c->highspeed)
+				continue;
+		} else {
+			if (!c->fullspeed)
+				continue;
+		}
+		count++;
+	}
+	return count;
+}
+
+/**
+ * bos_desc() - prepares the BOS descriptor.
+ * @cdev: pointer to usb_composite device to generate the bos
+ *	descriptor for
+ *
+ * This function generates the BOS (Binary Device Object)
+ * descriptor and its device capabilities descriptors. The BOS
+ * descriptor should be supported by a SuperSpeed device.
+ */
+static int bos_desc(struct usb_composite_dev *cdev)
+{
+	struct usb_ext_cap_descriptor	*usb_ext;
+	struct usb_ss_cap_descriptor	*ss_cap;
+	struct usb_dcd_config_params	dcd_config_params;
+	struct usb_bos_descriptor	*bos = cdev->req->buf;
+
+	bos->bLength = USB_DT_BOS_SIZE;
+	bos->bDescriptorType = USB_DT_BOS;
+
+	bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE);
+	bos->bNumDeviceCaps = 0;
+
+	/*
+	 * A SuperSpeed device shall include the USB2.0 extension descriptor
+	 * and shall support LPM when operating in USB2.0 HS mode.
+	 */
+	usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+	bos->bNumDeviceCaps++;
+	le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
+	usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
+	usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+	usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
+	usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
+
+	/*
+	 * The Superspeed USB Capability descriptor shall be implemented by all
+	 * SuperSpeed devices.
+	 */
+	ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+	bos->bNumDeviceCaps++;
+	le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
+	ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
+	ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+	ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
+	ss_cap->bmAttributes = 0; /* LTM is not supported yet */
+	ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
+				USB_FULL_SPEED_OPERATION |
+				USB_HIGH_SPEED_OPERATION |
+				USB_5GBPS_OPERATION);
+	ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
+
+	/* Get Controller configuration */
+	if (cdev->gadget->ops->get_config_params)
+		cdev->gadget->ops->get_config_params(&dcd_config_params);
+	else {
+		dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT;
+		dcd_config_params.bU2DevExitLat =
+			cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
+	}
+	ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
+	ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
+
+	return le16_to_cpu(bos->wTotalLength);
+}
+
+static void device_qual(struct usb_composite_dev *cdev)
+{
+	struct usb_qualifier_descriptor	*qual = cdev->req->buf;
+
+	qual->bLength = sizeof(*qual);
+	qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER;
+	/* POLICY: same bcdUSB and device type info at both speeds */
+	qual->bcdUSB = cdev->desc.bcdUSB;
+	qual->bDeviceClass = cdev->desc.bDeviceClass;
+	qual->bDeviceSubClass = cdev->desc.bDeviceSubClass;
+	qual->bDeviceProtocol = cdev->desc.bDeviceProtocol;
+	/* ASSUME same EP0 fifo size at both speeds */
+	qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket;
+	qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER);
+	qual->bRESERVED = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void reset_config(struct usb_composite_dev *cdev)
+{
+	struct usb_function		*f;
+
+	DBG(cdev, "reset config\n");
+
+	list_for_each_entry(f, &cdev->config->functions, list) {
+		if (f->disable)
+			f->disable(f);
+
+		bitmap_zero(f->endpoints, 32);
+	}
+	cdev->config = NULL;
+	cdev->delayed_status = 0;
+}
+
+//static int set_config(struct usb_composite_dev *cdev,
+int set_config(struct usb_composite_dev *cdev,
+
+		const struct usb_ctrlrequest *ctrl, unsigned number)
+{
+	struct usb_gadget	*gadget = cdev->gadget;
+	struct usb_configuration *c = NULL;
+	int			result = -EINVAL;
+	unsigned		power = gadget_is_otg(gadget) ? 8 : 100;
+	int			tmp;
+
+
+	if (number) {
+		list_for_each_entry(c, &cdev->configs, list) {
+			if (c->bConfigurationValue == number) {
+				/*
+				 * We disable the FDs of the previous
+				 * configuration only if the new configuration
+				 * is a valid one
+				 */
+				if (cdev->config)
+					reset_config(cdev);
+				result = 0;
+				break;
+			}
+		}
+		if (result < 0)
+			goto done;
+	} else { /* Zero configuration value - need to reset the config */
+		if (cdev->config)
+			reset_config(cdev);
+		result = 0;
+	}
+#if 0
+	INFO(cdev, "%s config #%d: %s\n",
+	     usb_speed_string(gadget->speed),
+	     number, c ? c->label : "unconfigured");
+#endif
+	if (!c)
+		goto done;
+
+	cdev->config = c;
+
+	/* Initialize all interfaces by setting them to altsetting zero. */
+	for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
+		struct usb_function	*f = c->interface[tmp];
+		struct usb_descriptor_header **descriptors;
+
+		if (!f)
+			break;
+
+		/*
+		 * Record which endpoints are used by the function. This is used
+		 * to dispatch control requests targeted at that endpoint to the
+		 * function's setup callback instead of the current
+		 * configuration's setup callback.
+		 */
+		switch (gadget->speed) {
+		case USB_SPEED_SUPER:
+			descriptors = f->ss_descriptors;
+			break;
+		case USB_SPEED_HIGH:
+			descriptors = f->hs_descriptors;
+			break;
+		default:
+			descriptors = f->descriptors;
+		}
+
+		for (; *descriptors; ++descriptors) {
+			struct usb_endpoint_descriptor *ep;
+			int addr;
+
+			if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
+				continue;
+
+			ep = (struct usb_endpoint_descriptor *)*descriptors;
+			addr = ((ep->bEndpointAddress & 0x80) >> 3)
+			     |  (ep->bEndpointAddress & 0x0f);
+			set_bit(addr, f->endpoints);
+		}
+
+		result = f->set_alt(f, tmp, 0);
+		if (result < 0) {
+			DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
+					tmp, f->name, f, result);
+
+			reset_config(cdev);
+			goto done;
+		}
+
+		if (result == USB_GADGET_DELAYED_STATUS) {
+			DBG(cdev,
+			 "%s: interface %d (%s) requested delayed status\n",
+					__func__, tmp, f->name);
+			cdev->delayed_status++;
+			DBG(cdev, "delayed_status count %d\n",
+					cdev->delayed_status);
+		}
+	}
+	cdev->suspended = 0;
+	/* when we return, be sure our power usage is valid */
+	power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW;
+done:
+	usb_gadget_vbus_draw(gadget, power);
+	if (result >= 0 && cdev->delayed_status)
+		result = USB_GADGET_DELAYED_STATUS;
+	return result;
+}
+
+/**
+ * usb_add_config() - add a configuration to a device.
+ * @cdev: wraps the USB gadget
+ * @config: the configuration, with bConfigurationValue assigned
+ * @bind: the configuration's bind function
+ * Context: single threaded during gadget setup
+ *
+ * One of the main tasks of a composite @bind() routine is to
+ * add each of the configurations it supports, using this routine.
+ *
+ * This function returns the value of the configuration's @bind(), which
+ * is zero for success else a negative errno value.  Binding configurations
+ * assigns global resources including string IDs, and per-configuration
+ * resources such as interface IDs and endpoints.
+ */
+int usb_add_config(struct usb_composite_dev *cdev,
+		struct usb_configuration *config,
+		int (*bind)(struct usb_configuration *))
+{
+	int				status = -EINVAL;
+	struct usb_configuration	*c;
+#if 0
+	DBG(cdev, "adding config #%u '%s'/%p\n",
+			config->bConfigurationValue,
+			config->label, config);
+#endif
+	if (!config->bConfigurationValue || !bind)
+		goto done;
+
+	/* Prevent duplicate configuration identifiers */
+	list_for_each_entry(c, &cdev->configs, list) {
+		if (c->bConfigurationValue == config->bConfigurationValue) {
+			status = -EBUSY;
+			USBSTACK_DBG("%s, %u, c_confval:%d, config_val:%d\n ", __func__, __LINE__, c->bConfigurationValue, config->bConfigurationValue);
+			goto done;
+		}
+	}
+
+	config->cdev = cdev;
+	list_add_tail(&config->list, &cdev->configs);
+	//USBSTACK_DBG("%s, add config0x%8x \n ", __func__, (u32)cdev);
+
+	INIT_LIST_HEAD(&config->functions);
+	config->next_interface_id = 0;
+	memset(config->interface, 0, sizeof(config->interface));
+
+	status = bind(config);
+	if (status < 0) {
+		list_del(&config->list);
+		config->cdev = NULL;
+	} else {
+		unsigned	i;
+#if 0
+		DBG(cdev, "cfg %d/%p speeds:%s%s%s\n",
+			config->bConfigurationValue, config,
+			config->superspeed ? " super" : "",
+			config->highspeed ? " high" : "",
+			config->fullspeed
+				? (gadget_is_dualspeed(cdev->gadget)
+					? " full"
+					: " full/low")
+				: "");
+
+#else
+	USBSTACK_DBG("cfg %d/%p speeds:%s%s%s",
+			config->bConfigurationValue, config,
+			config->superspeed ? " super" : "",
+			config->highspeed ? " high" : "",
+			config->fullspeed
+				? (gadget_is_dualspeed(cdev->gadget)
+					? " full"
+					: " full/low")
+				: "");
+
+#endif
+		for (i = 0; i < MAX_CONFIG_INTERFACES; i++) {
+			struct usb_function	*f = config->interface[i];
+
+			if (!f)
+				continue;
+			DBG(cdev, "  interface %d = %s/%p\n",
+				i, f->name, f);
+		}
+	}
+
+	/* set_alt(), or next bind(), sets up
+	 * ep->driver_data as needed.
+	 */
+	usb_ep_autoconfig_reset(cdev->gadget);
+
+done:
+	if (status){
+		DBG(cdev, "added config '%s'/%u --> %d\n", config->label,
+				config->bConfigurationValue, status);
+
+		USBSTACK_DBG( "added config '%s'/%u --> %d", config->label,
+				config->bConfigurationValue, status);
+
+	}
+	return status;
+}
+
+static int unbind_config(struct usb_composite_dev *cdev,
+			      struct usb_configuration *config)
+{
+	while (!list_empty(&config->functions)) {
+		struct usb_function		*f;
+
+		f = list_first_entry(&config->functions,
+				struct usb_function, list);
+		list_del(&f->list);
+		if (f->unbind) {
+			DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
+			f->unbind(config, f);
+			/* may free memory for "f" */
+		}
+	}
+	if (config->unbind) {
+		DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
+		config->unbind(config);
+			/* may free memory for "c" */
+	}
+	return 0;
+}
+
+/**
+ * usb_remove_config() - remove a configuration from a device.
+ * @cdev: wraps the USB gadget
+ * @config: the configuration
+ *
+ * Drivers must call usb_gadget_disconnect before calling this function
+ * to disconnect the device from the host and make sure the host will not
+ * try to enumerate the device while we are changing the config list.
+ */
+int usb_remove_config(struct usb_composite_dev *cdev,
+		      struct usb_configuration *config)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+
+	if (cdev->config == config)
+		reset_config(cdev);
+
+	list_del(&config->list);
+
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	return unbind_config(cdev, config);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* We support strings in multiple languages ... string descriptor zero
+ * says which languages are supported.  The typical case will be that
+ * only one language (probably English) is used, with I18N handled on
+ * the host side.
+ */
+
+static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf)
+{
+	const struct usb_gadget_strings	*s;
+	u16				language;
+	__le16				*tmp;
+
+	while (*sp) {
+		s = *sp;
+		language = cpu_to_le16(s->language);
+		for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) {
+			if (*tmp == language)
+				goto repeat;
+		}
+		*tmp++ = language;
+repeat:
+		sp++;
+	}
+}
+
+static int lookup_string(
+	struct usb_gadget_strings	**sp,
+	void				*buf,
+	u16				language,
+	int				id
+)
+{
+	struct usb_gadget_strings	*s;
+	int				value;
+
+	while (*sp) {
+		s = *sp++;
+		if (s->language != language)
+			continue;
+		value = usb_gadget_get_string(s, id, buf);
+		if (value > 0)
+			return value;
+	}
+	return -EINVAL;
+}
+
+static int get_string(struct usb_composite_dev *cdev,
+		void *buf, u16 language, int id)
+{
+	struct usb_configuration	*c;
+	struct usb_function		*f;
+	int				len;
+	const char			*str;
+
+	/* Yes, not only is USB's I18N support probably more than most
+	 * folk will ever care about ... also, it's all supported here.
+	 * (Except for UTF8 support for Unicode's "Astral Planes".)
+	 */
+
+	/* 0 == report all available language codes */
+	if (id == 0) {
+		struct usb_string_descriptor	*s = buf;
+		struct usb_gadget_strings	**sp;
+
+		memset(s, 0, 256);
+		s->bDescriptorType = USB_DT_STRING;
+
+		sp = composite->strings;
+		if (sp)
+			collect_langs(sp, s->wData);
+
+		list_for_each_entry(c, &cdev->configs, list) {
+			sp = c->strings;
+			if (sp)
+				collect_langs(sp, s->wData);
+
+			list_for_each_entry(f, &c->functions, list) {
+				sp = f->strings;
+				if (sp)
+					collect_langs(sp, s->wData);
+			}
+		}
+
+		for (len = 0; len <= 126 && s->wData[len]; len++)
+			continue;
+		if (!len)
+			return -EINVAL;
+
+		s->bLength = 2 * (len + 1);
+		return s->bLength;
+	}
+
+	/* Otherwise, look up and return a specified string.  First
+	 * check if the string has not been overridden.
+	 */
+	if (cdev->manufacturer_override == id)
+		str = iManufacturer ?: composite->iManufacturer ?:
+			composite_manufacturer;
+	else if (cdev->product_override == id)
+		str = iProduct ?: composite->iProduct;
+	else if (cdev->serial_override == id)
+		str = iSerialNumber;
+	else
+		str = NULL;
+	if (str) {
+		struct usb_gadget_strings strings = {
+			.language = language,
+			.strings  = &(struct usb_string) { 0xff, str }
+		};
+		return usb_gadget_get_string(&strings, 0xff, buf);
+	}
+
+	/* String IDs are device-scoped, so we look up each string
+	 * table we're told about.  These lookups are infrequent;
+	 * simpler-is-better here.
+	 */
+	if (composite->strings) {
+		len = lookup_string(composite->strings, buf, language, id);
+		if (len > 0)
+			return len;
+	}
+	list_for_each_entry(c, &cdev->configs, list) {
+		if (c->strings) {
+			len = lookup_string(c->strings, buf, language, id);
+			if (len > 0)
+				return len;
+		}
+		list_for_each_entry(f, &c->functions, list) {
+			if (!f->strings)
+				continue;
+			len = lookup_string(f->strings, buf, language, id);
+			if (len > 0)
+				return len;
+		}
+	}
+	return -EINVAL;
+}
+
+/**
+ * usb_string_id() - allocate an unused string ID
+ * @cdev: the device whose string descriptor IDs are being allocated
+ * Context: single threaded during gadget setup
+ *
+ * @usb_string_id() is called from bind() callbacks to allocate
+ * string IDs.  Drivers for functions, configurations, or gadgets will
+ * then store that ID in the appropriate descriptors and string table.
+ *
+ * All string identifier should be allocated using this,
+ * @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure
+ * that for example different functions don't wrongly assign different
+ * meanings to the same identifier.
+ */
+int usb_string_id(struct usb_composite_dev *cdev)
+{
+	if (cdev->next_string_id < 254) {
+		/* string id 0 is reserved by USB spec for list of
+		 * supported languages */
+		/* 255 reserved as well? -- mina86 */
+		cdev->next_string_id++;
+		return cdev->next_string_id;
+	}
+	return -ENODEV;
+}
+
+/**
+ * usb_string_ids() - allocate unused string IDs in batch
+ * @cdev: the device whose string descriptor IDs are being allocated
+ * @str: an array of usb_string objects to assign numbers to
+ * Context: single threaded during gadget setup
+ *
+ * @usb_string_ids() is called from bind() callbacks to allocate
+ * string IDs.  Drivers for functions, configurations, or gadgets will
+ * then copy IDs from the string table to the appropriate descriptors
+ * and string table for other languages.
+ *
+ * All string identifier should be allocated using this,
+ * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
+ * example different functions don't wrongly assign different meanings
+ * to the same identifier.
+ */
+int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
+{
+	int next = cdev->next_string_id;
+
+	for (; str->s; ++str) {
+		if (unlikely(next >= 254))
+			return -ENODEV;
+		str->id = ++next;
+	}
+
+	cdev->next_string_id = next;
+
+	return 0;
+}
+
+/**
+ * usb_string_ids_n() - allocate unused string IDs in batch
+ * @c: the device whose string descriptor IDs are being allocated
+ * @n: number of string IDs to allocate
+ * Context: single threaded during gadget setup
+ *
+ * Returns the first requested ID.  This ID and next @n-1 IDs are now
+ * valid IDs.  At least provided that @n is non-zero because if it
+ * is, returns last requested ID which is now very useful information.
+ *
+ * @usb_string_ids_n() is called from bind() callbacks to allocate
+ * string IDs.  Drivers for functions, configurations, or gadgets will
+ * then store that ID in the appropriate descriptors and string table.
+ *
+ * All string identifier should be allocated using this,
+ * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
+ * example different functions don't wrongly assign different meanings
+ * to the same identifier.
+ */
+int usb_string_ids_n(struct usb_composite_dev *c, unsigned n)
+{
+	unsigned next = c->next_string_id;
+	if (unlikely(n > 254 || (unsigned)next + n > 254))
+		return -ENODEV;
+	c->next_string_id += n;
+	return next + 1;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	//if (req->status || req->actual != req->length)
+	//	DBG((struct usb_composite_dev *) ep->driver_data,
+	//			"setup complete --> %d, %d/%d\n",
+	//			req->status, req->actual, req->length);
+}
+
+/*
+ * The setup() callback implements all the ep0 functionality that's
+ * not handled lower down, in hardware or the hardware driver(like
+ * device and endpoint feature flags, and their status).  It's all
+ * housekeeping for the gadget function we're implementing.  Most of
+ * the work is in config and function specific setup.
+ */
+static int
+composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+    if(NULL == cdev)
+    {
+        printk("[func]:%s, [line]:%u  cdev is null \n",  __func__, __LINE__);
+        return -EINVAL;
+    }
+    struct usb_request		*req = cdev->req;
+    if(NULL == req)
+    {
+        printk("[func]:%s, [line]:%u  req is null \n",  __func__, __LINE__);
+        return -EINVAL;
+    }
+
+	int				value = -EOPNOTSUPP;
+	int				status = 0;
+	u16				w_index = le16_to_cpu(ctrl->wIndex);
+	u8				intf = w_index & 0xFF;
+	u16				w_value = le16_to_cpu(ctrl->wValue);
+	u16				w_length = le16_to_cpu(ctrl->wLength);
+	struct usb_function		*f = NULL;
+	u8				endp;
+
+	if (w_length > USB_BUFSIZ) {
+		if (ctrl->bRequestType == USB_DIR_OUT) {
+			goto done;
+		} else {
+			/* Cast away the const, we are going to overwrite on purpose. */
+			w_length = USB_BUFSIZ;
+		}
+	}
+
+	/* partial re-init of the response message; the function or the
+	 * gadget might need to intercept e.g. a control-OUT completion
+	 * when we delegate to it.
+	 */
+	req->zero = 0;
+	req->complete = composite_setup_complete;
+	req->length = 0;
+	gadget->ep0->driver_data = cdev;
+
+	
+	//USBSTACK_DBG("%s,%u, 0x%x \n", __FUNCTION__, __LINE__, ctrl->bRequest);
+
+	switch (ctrl->bRequest) {
+
+	/* we handle all standard USB descriptors */
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			goto unknown;
+		switch (w_value >> 8) {
+
+		case USB_DT_DEVICE:
+			cdev->desc.bNumConfigurations =
+				count_configs(cdev, USB_DT_DEVICE);
+			cdev->desc.bMaxPacketSize0 =
+				cdev->gadget->ep0->maxpacket;
+			if (gadget_is_superspeed(gadget)) {
+				if (gadget->speed >= USB_SPEED_SUPER) {
+					cdev->desc.bcdUSB = cpu_to_le16(0x0300);
+					cdev->desc.bMaxPacketSize0 = 9;
+				} else {
+					cdev->desc.bcdUSB = cpu_to_le16(0x0210);
+				}
+			}
+
+			value = min(w_length, (u16) sizeof cdev->desc);
+			memcpy(req->buf, &cdev->desc, value);
+			break;
+		case USB_DT_DEVICE_QUALIFIER:
+			if (!gadget_is_dualspeed(gadget) ||
+			    gadget->speed >= USB_SPEED_SUPER)
+				break;
+			device_qual(cdev);
+			value = min_t(int, w_length,
+				sizeof(struct usb_qualifier_descriptor));
+			break;
+		case USB_DT_OTHER_SPEED_CONFIG:
+			if (!gadget_is_dualspeed(gadget) ||
+			    gadget->speed >= USB_SPEED_SUPER)
+				break;
+			/* FALLTHROUGH */
+		case USB_DT_CONFIG:
+			value = config_desc(cdev, w_value);
+			if (value >= 0)
+				value = min(w_length, (u16) value);
+			break;
+		case USB_DT_STRING:
+			value = get_string(cdev, req->buf,
+					w_index, w_value & 0xff);
+			if (value >= 0)
+				value = min(w_length, (u16) value);
+			break;
+		case USB_DT_BOS:
+			if (gadget_is_superspeed(gadget)) {
+				value = bos_desc(cdev);
+				value = min(w_length, (u16) value);
+			}
+			break;
+		}
+		break;
+
+	/* any number of configs can work */
+	case USB_REQ_SET_CONFIGURATION:
+		USBSTACK_DBG("USB_REQ_SET_CONFIGURATION");
+		printk("set config\n");
+		if (ctrl->bRequestType != 0)
+			goto unknown;
+		if (gadget_is_otg(gadget)) {
+			if (gadget->a_hnp_support)
+				DBG(cdev, "HNP available\n");
+			else if (gadget->a_alt_hnp_support)
+				DBG(cdev, "HNP on another port\n");
+			else
+				VDBG(cdev, "HNP inactive\n");
+		}
+		spin_lock(&cdev->lock);
+		value = set_config(cdev, ctrl, w_value);
+		spin_unlock(&cdev->lock);
+		break;
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			goto unknown;
+		if (cdev->config)
+			*(u8 *)req->buf = cdev->config->bConfigurationValue;
+		else
+			*(u8 *)req->buf = 0;
+		value = min(w_length, (u16) 1);
+		break;
+
+	/* function drivers must handle get/set altsetting; if there's
+	 * no get() method, we know only altsetting zero works.
+	 */
+	case USB_REQ_SET_INTERFACE:
+		if (ctrl->bRequestType != USB_RECIP_INTERFACE)
+			goto unknown;
+		if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+			break;
+		f = cdev->config->interface[intf];
+		if (!f)
+			break;
+		if (w_value && !f->set_alt)
+			break;
+		value = f->set_alt(f, w_index, w_value);
+		if (value == USB_GADGET_DELAYED_STATUS) {
+			DBG(cdev,
+			 "%s: interface %d (%s) requested delayed status\n",
+					__func__, intf, f->name);
+			cdev->delayed_status++;
+			DBG(cdev, "delayed_status count %d\n",
+					cdev->delayed_status);
+		}
+		break;
+	case USB_REQ_GET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
+			goto unknown;
+		if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+			break;
+		f = cdev->config->interface[intf];
+		if (!f)
+			break;
+		/* lots of interfaces only need altsetting zero... */
+		value = f->get_alt ? f->get_alt(f, w_index) : 0;
+		if (value < 0)
+			break;
+		*((u8 *)req->buf) = value;
+		value = min(w_length, (u16) 1);
+		break;
+
+	/*
+	 * USB 3.0 additions:
+	 * Function driver should handle get_status request. If such cb
+	 * wasn't supplied we respond with default value = 0
+	 * Note: function driver should supply such cb only for the first
+	 * interface of the function
+	 */
+	case USB_REQ_GET_STATUS:
+		if (!gadget_is_superspeed(gadget))
+			goto unknown;
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
+			goto unknown;
+		value = 2;	/* This is the length of the get_status reply */
+		put_unaligned_le16(0, req->buf);
+		if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+			break;
+		f = cdev->config->interface[intf];
+		if (!f)
+			break;
+		status = f->get_status ? f->get_status(f) : 0;
+		if (status < 0)
+			break;
+		put_unaligned_le16(status & 0x0000ffff, req->buf);
+		break;
+	/*
+	 * Function drivers should handle SetFeature/ClearFeature
+	 * (FUNCTION_SUSPEND) request. function_suspend cb should be supplied
+	 * only for the first interface of the function
+	 */
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+		if (!gadget_is_superspeed(gadget))
+			goto unknown;
+		if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE))
+			goto unknown;
+		switch (w_value) {
+		case USB_INTRF_FUNC_SUSPEND:
+			if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+				break;
+			f = cdev->config->interface[intf];
+			if (!f)
+				break;
+			value = 0;
+			if (f->func_suspend)
+				value = f->func_suspend(f, w_index >> 8);
+			if (value < 0) {
+				ERROR(cdev,
+				      "func_suspend() returned error %d\n",
+				      value);
+				value = 0;
+			}
+			break;
+		}
+		break;
+	default:
+unknown:
+		VDBG(cdev,
+			"non-core control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+		/* functions always handle their interfaces and endpoints...
+		 * punt other recipients (other, WUSB, ...) to the current
+		 * configuration code.
+		 *
+		 * REVISIT it could make sense to let the composite device
+		 * take such requests too, if that's ever needed:  to work
+		 * in config 0, etc.
+		 */
+		switch (ctrl->bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_INTERFACE:
+			if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+				break;
+			f = cdev->config->interface[intf];
+			break;
+
+		case USB_RECIP_ENDPOINT:
+			endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
+			list_for_each_entry(f, &cdev->config->functions, list) {
+				if (test_bit(endp, f->endpoints))
+					break;
+			}
+			if (&f->list == &cdev->config->functions)
+				f = NULL;
+			break;
+		}
+
+		if (f && f->setup)
+			value = f->setup(f, ctrl);
+		else {
+			struct usb_configuration	*c;
+
+			c = cdev->config;
+			if (c && c->setup)
+				value = c->setup(c, ctrl);
+		}
+
+		goto done;
+	}
+
+	/* respond with data transfer before status phase? */
+	if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) {
+		req->length = value;
+		req->zero = value < w_length;
+		value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DBG(cdev, "ep_queue --> %d\n", value);
+			USBSTACK_DBG("%s, %u ep_queue --> %d", __func__, __LINE__, value);
+			req->status = 0;
+			composite_setup_complete(gadget->ep0, req);
+		}
+	} else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) {
+		WARN(cdev,
+			"%s: Delayed status not supported for w_length != 0",
+			__func__);
+		USBSTACK_DBG("%s, %u Delayed status not supported for w_length != 0", __func__, __LINE__);
+	}
+
+done:
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static void composite_disconnect(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	unsigned long			flags;
+    if(NULL == cdev)
+    {
+        printk("[func]:%s, [line]:%u  cdev is null \n",  __func__, __LINE__);
+        return ;
+    }
+
+	/* REVISIT:  should we have config and device level
+	 * disconnect callbacks?
+	 */
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		reset_config(cdev);
+	if (composite->disconnect)
+		composite->disconnect(cdev);
+	spin_unlock_irqrestore(&cdev->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t composite_show_suspended(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+	struct usb_composite_dev *cdev = get_gadget_data(gadget);
+
+	return sprintf(buf, "%d\n", cdev->suspended);
+}
+
+static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
+
+static void
+composite_unbind(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+    if(NULL == cdev)
+    {
+        printk("[func]:%s, [line]:%u  cdev is null \n",  __func__, __LINE__);
+        return ;
+    } 
+	/* composite_disconnect() must already have been called
+	 * by the underlying peripheral controller driver!
+	 * so there's no i/o concurrency that could affect the
+	 * state protected by cdev->lock.
+	 */
+
+	while (!list_empty(&cdev->configs)) {
+		struct usb_configuration	*c;
+		c = list_first_entry(&cdev->configs,
+				struct usb_configuration, list);
+		list_del(&c->list);
+		unbind_config(cdev, c);
+	}
+	if (composite->unbind)
+		composite->unbind(cdev);
+
+	if (cdev->req) {
+		kfree(cdev->req->buf);
+		usb_ep_free_request(gadget->ep0, cdev->req);
+	}
+	device_remove_file(&gadget->dev, &dev_attr_suspended);
+	kfree(cdev);
+	set_gadget_data(gadget, NULL);
+	composite = NULL;
+}
+
+static u8 override_id(struct usb_composite_dev *cdev, u8 *desc)
+{
+	if (!*desc) {
+		int ret = usb_string_id(cdev);
+		if (unlikely(ret < 0))
+			WARNING(cdev, "failed to override string ID\n");
+		else
+			*desc = ret;
+	}
+
+	return *desc;
+}
+
+static int composite_bind(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev;
+	int				status = -ENOMEM;
+
+	cdev = kzalloc(sizeof *cdev, GFP_KERNEL);
+	if (!cdev)
+		return status;
+
+	spin_lock_init(&cdev->lock);
+	cdev->gadget = gadget;
+	set_gadget_data(gadget, cdev);
+	INIT_LIST_HEAD(&cdev->configs);
+
+	/* preallocate control response and buffer */
+	cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
+	if (!cdev->req)
+		goto fail;
+	cdev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL);
+	if (!cdev->req->buf)
+		goto fail;
+	cdev->req->complete = composite_setup_complete;
+	gadget->ep0->driver_data = cdev;
+
+	cdev->bufsiz = USB_BUFSIZ;
+	cdev->driver = composite;
+
+	/*
+	 * As per USB compliance update, a device that is actively drawing
+	 * more than 100mA from USB must report itself as bus-powered in
+	 * the GetStatus(DEVICE) call.
+	 */
+	if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW)
+		usb_gadget_set_selfpowered(gadget);
+
+	/* interface and string IDs start at zero via kzalloc.
+	 * we force endpoints to start unassigned; few controller
+	 * drivers will zero ep->driver_data.
+	 */
+	usb_ep_autoconfig_reset(cdev->gadget);
+
+	/* composite gadget needs to assign strings for whole device (like
+	 * serial number), register function drivers, potentially update
+	 * power state and consumption, etc
+	 */
+	status = composite_gadget_bind(cdev);
+	if (status < 0)
+		goto fail;
+
+	cdev->desc = *composite->dev;
+
+	/* standardized runtime overrides for device ID data */
+	if (idVendor)
+		cdev->desc.idVendor = cpu_to_le16(idVendor);
+	if (idProduct)
+		cdev->desc.idProduct = cpu_to_le16(idProduct);
+	if (bcdDevice)
+		cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
+
+	/* string overrides */
+	if (iManufacturer || !cdev->desc.iManufacturer) {
+		if (!iManufacturer && !composite->iManufacturer &&
+		    !*composite_manufacturer)
+			snprintf(composite_manufacturer,
+				 sizeof composite_manufacturer,
+				 "%s %s with %s",
+				 init_utsname()->sysname,
+				 init_utsname()->release,
+				 gadget->name);
+
+		cdev->manufacturer_override =
+			override_id(cdev, &cdev->desc.iManufacturer);
+	}
+
+	if (iProduct || (!cdev->desc.iProduct && composite->iProduct))
+		cdev->product_override =
+			override_id(cdev, &cdev->desc.iProduct);
+
+	if (iSerialNumber)
+		cdev->serial_override =
+			override_id(cdev, &cdev->desc.iSerialNumber);
+
+	/* has userspace failed to provide a serial number? */
+	if (composite->needs_serial && !cdev->desc.iSerialNumber)
+		WARNING(cdev, "userspace failed to provide iSerialNumber\n");
+
+	/* finish up */
+	status = device_create_file(&gadget->dev, &dev_attr_suspended);
+	if (status)
+		goto fail;
+
+	INFO(cdev, "%s ready\n", composite->name);
+	return 0;
+
+fail:
+	composite_unbind(gadget);
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+composite_suspend(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	struct usb_function		*f;
+
+	/* REVISIT:  should we have config level
+	 * suspend/resume callbacks?
+	 */
+	DBG(cdev, "suspend\n");
+	USBSTACK_DBG("%s suspend", __func__);
+	
+	if (cdev->config) {
+		list_for_each_entry(f, &cdev->config->functions, list) {
+			if (f->suspend)
+				f->suspend(f);
+		}
+	}
+	if (composite->suspend)
+		composite->suspend(cdev);
+
+	cdev->suspended = 1;
+
+	usb_gadget_vbus_draw(gadget, 2);
+}
+
+static void
+composite_resume(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	struct usb_function		*f;
+	u8				maxpower;
+
+	/* REVISIT:  should we have config level
+	 * suspend/resume callbacks?
+	 */
+	DBG(cdev, "resume\n");
+	USBSTACK_DBG("%s resume", __func__);
+	if (composite->resume)
+		composite->resume(cdev);
+	if (cdev->config) {
+		list_for_each_entry(f, &cdev->config->functions, list) {
+			if (f->resume)
+				f->resume(f);
+			if(!cdev->config){
+				USBSTACK_DBG("%s resume config is NULL\n", __func__);	
+				printk("%s resume config is NULL\n", __func__);			
+				goto RESUME_OUT;
+			}
+		}
+
+		maxpower = cdev->config->bMaxPower;
+
+		usb_gadget_vbus_draw(gadget, maxpower ?
+			(2 * maxpower) : CONFIG_USB_GADGET_VBUS_DRAW);
+	}
+RESUME_OUT:
+	cdev->suspended = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_gadget_driver composite_driver = {
+#ifdef CONFIG_USB_GADGET_SUPERSPEED
+	.max_speed	= USB_SPEED_SUPER,
+#else
+	.max_speed	= USB_SPEED_HIGH,
+#endif
+
+	.unbind		= composite_unbind,
+
+	.setup		= composite_setup,
+	.disconnect	= composite_disconnect,
+
+	.suspend	= composite_suspend,
+	.resume		= composite_resume,
+
+	.driver	= {
+		.owner		= THIS_MODULE,
+	},
+};
+
+/**
+ * usb_composite_probe() - register a composite driver
+ * @driver: the driver to register
+ * @bind: the callback used to allocate resources that are shared across the
+ *	whole device, such as string IDs, and add its configurations using
+ *	@usb_add_config().  This may fail by returning a negative errno
+ *	value; it should return zero on successful initialization.
+ * Context: single threaded during gadget setup
+ *
+ * This function is used to register drivers using the composite driver
+ * framework.  The return value is zero, or a negative errno value.
+ * Those values normally come from the driver's @bind method, which does
+ * all the work of setting up the driver to match the hardware.
+ *
+ * On successful return, the gadget is ready to respond to requests from
+ * the host, unless one of its components invokes usb_gadget_disconnect()
+ * while it was binding.  That would usually be done in order to wait for
+ * some userspace participation.
+ */
+int usb_composite_probe(struct usb_composite_driver *driver,
+			       int (*bind)(struct usb_composite_dev *cdev))
+{
+	if (!driver || !driver->dev || !bind || composite)
+		return -EINVAL;
+
+	if (!driver->name)
+		driver->name = "composite";
+	if (!driver->iProduct)
+		driver->iProduct = driver->name;
+	composite_driver.function =  (char *) driver->name;
+	composite_driver.driver.name = driver->name;
+	composite_driver.max_speed =
+		min_t(u8, composite_driver.max_speed, driver->max_speed);
+	composite = driver;
+	composite_gadget_bind = bind;
+
+	return usb_gadget_probe_driver(&composite_driver, composite_bind);
+}
+
+/**
+ * usb_composite_unregister() - unregister a composite driver
+ * @driver: the driver to unregister
+ *
+ * This function is used to unregister drivers using the composite
+ * driver framework.
+ */
+void usb_composite_unregister(struct usb_composite_driver *driver)
+{
+	if (composite != driver)
+		return;
+	usb_gadget_unregister_driver(&composite_driver);
+}
+
+/**
+ * usb_composite_setup_continue() - Continue with the control transfer
+ * @cdev: the composite device who's control transfer was kept waiting
+ *
+ * This function must be called by the USB function driver to continue
+ * with the control transfer's data/status stage in case it had requested to
+ * delay the data/status stages. A USB function's setup handler (e.g. set_alt())
+ * can request the composite framework to delay the setup request's data/status
+ * stages by returning USB_GADGET_DELAYED_STATUS.
+ */
+void usb_composite_setup_continue(struct usb_composite_dev *cdev)
+{
+	int			value;
+	struct usb_request	*req = cdev->req;
+	unsigned long		flags;
+
+	DBG(cdev, "%s\n", __func__);
+	spin_lock_irqsave(&cdev->lock, flags);
+
+	if (cdev->delayed_status == 0) {
+		WARN(cdev, "%s: Unexpected call\n", __func__);
+
+	} else if (--cdev->delayed_status == 0) {
+		DBG(cdev, "%s: Completing delayed status\n", __func__);
+		req->length = 0;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DBG(cdev, "ep_queue --> %d\n", value);
+			req->status = 0;
+			composite_setup_complete(cdev->gadget->ep0, req);
+		}
+	}
+
+	spin_unlock_irqrestore(&cdev->lock, flags);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/config.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/config.c
new file mode 100644
index 0000000..7542a72
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/config.c
@@ -0,0 +1,158 @@
+/*
+ * usb/gadget/config.c -- simplify building config descriptors
+ *
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+
+/**
+ * usb_descriptor_fillbuf - fill buffer with descriptors
+ * @buf: Buffer to be filled
+ * @buflen: Size of buf
+ * @src: Array of descriptor pointers, terminated by null pointer.
+ *
+ * Copies descriptors into the buffer, returning the length or a
+ * negative error code if they can't all be copied.  Useful when
+ * assembling descriptors for an associated set of interfaces used
+ * as part of configuring a composite device; or in other cases where
+ * sets of descriptors need to be marshaled.
+ */
+int
+usb_descriptor_fillbuf(void *buf, unsigned buflen,
+		const struct usb_descriptor_header **src)
+{
+	u8	*dest = buf;
+
+	if (!src)
+		return -EINVAL;
+
+	/* fill buffer from src[] until null descriptor ptr */
+	for (; NULL != *src; src++) {
+		unsigned		len = (*src)->bLength;
+
+		if (len > buflen)
+			return -EINVAL;
+		memcpy(dest, *src, len);
+		buflen -= len;
+		dest += len;
+	}
+	return dest - (u8 *)buf;
+}
+
+
+/**
+ * usb_gadget_config_buf - builts a complete configuration descriptor
+ * @config: Header for the descriptor, including characteristics such
+ *	as power requirements and number of interfaces.
+ * @desc: Null-terminated vector of pointers to the descriptors (interface,
+ *	endpoint, etc) defining all functions in this device configuration.
+ * @buf: Buffer for the resulting configuration descriptor.
+ * @length: Length of buffer.  If this is not big enough to hold the
+ *	entire configuration descriptor, an error code will be returned.
+ *
+ * This copies descriptors into the response buffer, building a descriptor
+ * for that configuration.  It returns the buffer length or a negative
+ * status code.  The config.wTotalLength field is set to match the length
+ * of the result, but other descriptor fields (including power usage and
+ * interface count) must be set by the caller.
+ *
+ * Gadget drivers could use this when constructing a config descriptor
+ * in response to USB_REQ_GET_DESCRIPTOR.  They will need to patch the
+ * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed.
+ */
+int usb_gadget_config_buf(
+	const struct usb_config_descriptor	*config,
+	void					*buf,
+	unsigned				length,
+	const struct usb_descriptor_header	**desc
+)
+{
+	struct usb_config_descriptor		*cp = buf;
+	int					len;
+
+	/* config descriptor first */
+	if (length < USB_DT_CONFIG_SIZE || !desc)
+		return -EINVAL;
+	*cp = *config;
+
+	/* then interface/endpoint/class/vendor/... */
+	len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
+			length - USB_DT_CONFIG_SIZE, desc);
+	if (len < 0)
+		return len;
+	len += USB_DT_CONFIG_SIZE;
+	if (len > 0xffff)
+		return -EINVAL;
+
+	/* patch up the config descriptor */
+	cp->bLength = USB_DT_CONFIG_SIZE;
+	cp->bDescriptorType = USB_DT_CONFIG;
+	cp->wTotalLength = cpu_to_le16(len);
+	cp->bmAttributes |= USB_CONFIG_ATT_ONE;
+	return len;
+}
+
+/**
+ * usb_copy_descriptors - copy a vector of USB descriptors
+ * @src: null-terminated vector to copy
+ * Context: initialization code, which may sleep
+ *
+ * This makes a copy of a vector of USB descriptors.  Its primary use
+ * is to support usb_function objects which can have multiple copies,
+ * each needing different descriptors.  Functions may have static
+ * tables of descriptors, which are used as templates and customized
+ * with identifiers (for interfaces, strings, endpoints, and more)
+ * as needed by a given function instance.
+ */
+struct usb_descriptor_header **
+usb_copy_descriptors(struct usb_descriptor_header **src)
+{
+	struct usb_descriptor_header **tmp;
+	unsigned bytes;
+	unsigned n_desc;
+	void *mem;
+	struct usb_descriptor_header **ret;
+
+	/* count descriptors and their sizes; then add vector size */
+	for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++)
+		bytes += (*tmp)->bLength;
+	bytes += (n_desc + 1) * sizeof(*tmp);
+
+	mem = kmalloc(bytes, GFP_KERNEL);
+	if (!mem)
+		return NULL;
+
+	/* fill in pointers starting at "tmp",
+	 * to descriptors copied starting at "mem";
+	 * and return "ret"
+	 */
+	tmp = mem;
+	ret = mem;
+	mem += (n_desc + 1) * sizeof(*tmp);
+	while (*src) {
+		memcpy(mem, *src, (*src)->bLength);
+		*tmp = mem;
+		tmp++;
+		mem += (*src)->bLength;
+		src++;
+	}
+	*tmp = NULL;
+
+	return ret;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dbgp.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dbgp.c
new file mode 100644
index 0000000..7a45344
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dbgp.c
@@ -0,0 +1,433 @@
+/*
+ * dbgp.c -- EHCI Debug Port device gadget
+ *
+ * Copyright (C) 2010 Stephane Duverger
+ *
+ * Released under the GPLv2.
+ */
+
+/* verbose messages */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* See comments in "zero.c" */
+#include "epautoconf.c"
+
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+#include "u_serial.c"
+#endif
+
+#define DRIVER_VENDOR_ID	0x0525 /* NetChip */
+#define DRIVER_PRODUCT_ID	0xc0de /* undefined */
+
+#define USB_DEBUG_MAX_PACKET_SIZE     8
+#define DBGP_REQ_EP0_LEN              128
+#define DBGP_REQ_LEN                  512
+
+static struct dbgp {
+	struct usb_gadget  *gadget;
+	struct usb_request *req;
+	struct usb_ep      *i_ep;
+	struct usb_ep      *o_ep;
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+	struct gserial     *serial;
+#endif
+} dbgp;
+
+static struct usb_device_descriptor device_desc = {
+	.bLength = sizeof device_desc,
+	.bDescriptorType = USB_DT_DEVICE,
+	.bcdUSB = __constant_cpu_to_le16(0x0200),
+	.bDeviceClass =	USB_CLASS_VENDOR_SPEC,
+	.idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_ID),
+	.idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_ID),
+	.bNumConfigurations = 1,
+};
+
+static struct usb_debug_descriptor dbg_desc = {
+	.bLength = sizeof dbg_desc,
+	.bDescriptorType = USB_DT_DEBUG,
+};
+
+static struct usb_endpoint_descriptor i_desc = {
+	.bLength = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+	.bmAttributes = USB_ENDPOINT_XFER_BULK,
+	.bEndpointAddress = USB_DIR_IN,
+};
+
+static struct usb_endpoint_descriptor o_desc = {
+	.bLength = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+	.bmAttributes = USB_ENDPOINT_XFER_BULK,
+	.bEndpointAddress = USB_DIR_OUT,
+};
+
+#ifdef CONFIG_USB_G_DBGP_PRINTK
+static int dbgp_consume(char *buf, unsigned len)
+{
+	char c;
+
+	if (!len)
+		return 0;
+
+	c = buf[len-1];
+	if (c != 0)
+		buf[len-1] = 0;
+
+	printk(KERN_NOTICE "%s%c", buf, c);
+	return 0;
+}
+
+static void __disable_ep(struct usb_ep *ep)
+{
+	if (ep && ep->driver_data == dbgp.gadget) {
+		usb_ep_disable(ep);
+		ep->driver_data = NULL;
+	}
+}
+
+static void dbgp_disable_ep(void)
+{
+	__disable_ep(dbgp.i_ep);
+	__disable_ep(dbgp.o_ep);
+}
+
+static void dbgp_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	int stp;
+	int err = 0;
+	int status = req->status;
+
+	if (ep == dbgp.i_ep) {
+		stp = 1;
+		goto fail;
+	}
+
+	if (status != 0) {
+		stp = 2;
+		goto release_req;
+	}
+
+	dbgp_consume(req->buf, req->actual);
+
+	req->length = DBGP_REQ_LEN;
+	err = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (err < 0) {
+		stp = 3;
+		goto release_req;
+	}
+
+	return;
+
+release_req:
+	kfree(req->buf);
+	usb_ep_free_request(dbgp.o_ep, req);
+	dbgp_disable_ep();
+fail:
+	dev_dbg(&dbgp.gadget->dev,
+		"complete: failure (%d:%d) ==> %d\n", stp, err, status);
+}
+
+static int dbgp_enable_ep_req(struct usb_ep *ep)
+{
+	int err, stp;
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req) {
+		err = -ENOMEM;
+		stp = 1;
+		goto fail_1;
+	}
+
+	req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
+	if (!req->buf) {
+		err = -ENOMEM;
+		stp = 2;
+		goto fail_2;
+	}
+
+	req->complete = dbgp_complete;
+	req->length = DBGP_REQ_LEN;
+	err = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (err < 0) {
+		stp = 3;
+		goto fail_3;
+	}
+
+	return 0;
+
+fail_3:
+	kfree(req->buf);
+fail_2:
+	usb_ep_free_request(dbgp.o_ep, req);
+fail_1:
+	dev_dbg(&dbgp.gadget->dev,
+		"enable ep req: failure (%d:%d)\n", stp, err);
+	return err;
+}
+
+static int __enable_ep(struct usb_ep *ep, struct usb_endpoint_descriptor *desc)
+{
+	int err;
+	ep->desc = desc;
+	err = usb_ep_enable(ep);
+	ep->driver_data = dbgp.gadget;
+	return err;
+}
+
+static int dbgp_enable_ep(void)
+{
+	int err, stp;
+
+	err = __enable_ep(dbgp.i_ep, &i_desc);
+	if (err < 0) {
+		stp = 1;
+		goto fail_1;
+	}
+
+	err = __enable_ep(dbgp.o_ep, &o_desc);
+	if (err < 0) {
+		stp = 2;
+		goto fail_2;
+	}
+
+	err = dbgp_enable_ep_req(dbgp.o_ep);
+	if (err < 0) {
+		stp = 3;
+		goto fail_3;
+	}
+
+	return 0;
+
+fail_3:
+	__disable_ep(dbgp.o_ep);
+fail_2:
+	__disable_ep(dbgp.i_ep);
+fail_1:
+	dev_dbg(&dbgp.gadget->dev, "enable ep: failure (%d:%d)\n", stp, err);
+	return err;
+}
+#endif
+
+static void dbgp_disconnect(struct usb_gadget *gadget)
+{
+#ifdef CONFIG_USB_G_DBGP_PRINTK
+	dbgp_disable_ep();
+#else
+	gserial_disconnect(dbgp.serial);
+#endif
+}
+
+static void dbgp_unbind(struct usb_gadget *gadget)
+{
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+	kfree(dbgp.serial);
+#endif
+	if (dbgp.req) {
+		kfree(dbgp.req->buf);
+		usb_ep_free_request(gadget->ep0, dbgp.req);
+	}
+
+	gadget->ep0->driver_data = NULL;
+}
+
+static int __init dbgp_configure_endpoints(struct usb_gadget *gadget)
+{
+	int stp;
+
+	usb_ep_autoconfig_reset(gadget);
+
+	dbgp.i_ep = usb_ep_autoconfig(gadget, &i_desc);
+	if (!dbgp.i_ep) {
+		stp = 1;
+		goto fail_1;
+	}
+
+	dbgp.i_ep->driver_data = gadget;
+	i_desc.wMaxPacketSize =
+		__constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
+
+	dbgp.o_ep = usb_ep_autoconfig(gadget, &o_desc);
+	if (!dbgp.o_ep) {
+		dbgp.i_ep->driver_data = NULL;
+		stp = 2;
+		goto fail_2;
+	}
+
+	dbgp.o_ep->driver_data = gadget;
+	o_desc.wMaxPacketSize =
+		__constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
+
+	dbg_desc.bDebugInEndpoint = i_desc.bEndpointAddress;
+	dbg_desc.bDebugOutEndpoint = o_desc.bEndpointAddress;
+
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+	dbgp.serial->in = dbgp.i_ep;
+	dbgp.serial->out = dbgp.o_ep;
+
+	dbgp.serial->in->desc = &i_desc;
+	dbgp.serial->out->desc = &o_desc;
+
+	if (gserial_setup(gadget, 1) < 0) {
+		stp = 3;
+		goto fail_3;
+	}
+
+	return 0;
+
+fail_3:
+	dbgp.o_ep->driver_data = NULL;
+#else
+	return 0;
+#endif
+fail_2:
+	dbgp.i_ep->driver_data = NULL;
+fail_1:
+	dev_dbg(&dbgp.gadget->dev, "ep config: failure (%d)\n", stp);
+	return -ENODEV;
+}
+
+static int __init dbgp_bind(struct usb_gadget *gadget)
+{
+	int err, stp;
+
+	dbgp.gadget = gadget;
+
+	dbgp.req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
+	if (!dbgp.req) {
+		err = -ENOMEM;
+		stp = 1;
+		goto fail;
+	}
+
+	dbgp.req->buf = kmalloc(DBGP_REQ_EP0_LEN, GFP_KERNEL);
+	if (!dbgp.req->buf) {
+		err = -ENOMEM;
+		stp = 2;
+		goto fail;
+	}
+
+	dbgp.req->length = DBGP_REQ_EP0_LEN;
+	gadget->ep0->driver_data = gadget;
+
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+	dbgp.serial = kzalloc(sizeof(struct gserial), GFP_KERNEL);
+	if (!dbgp.serial) {
+		stp = 3;
+		err = -ENOMEM;
+		goto fail;
+	}
+#endif
+	err = dbgp_configure_endpoints(gadget);
+	if (err < 0) {
+		stp = 4;
+		goto fail;
+	}
+
+	dev_dbg(&dbgp.gadget->dev, "bind: success\n");
+	return 0;
+
+fail:
+	dev_dbg(&gadget->dev, "bind: failure (%d:%d)\n", stp, err);
+	dbgp_unbind(gadget);
+	return err;
+}
+
+static void dbgp_setup_complete(struct usb_ep *ep,
+				struct usb_request *req)
+{
+	dev_dbg(&dbgp.gadget->dev, "setup complete: %d, %d/%d\n",
+		req->status, req->actual, req->length);
+}
+
+static int dbgp_setup(struct usb_gadget *gadget,
+		      const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_request *req = dbgp.req;
+	u8 request = ctrl->bRequest;
+	u16 value = le16_to_cpu(ctrl->wValue);
+	u16 length = le16_to_cpu(ctrl->wLength);
+	int err = -EOPNOTSUPP;
+	void *data = NULL;
+	u16 len = 0;
+	int port_line = 0;
+	gadget->ep0->driver_data = gadget;
+
+	if (request == USB_REQ_GET_DESCRIPTOR) {
+		switch (value>>8) {
+		case USB_DT_DEVICE:
+			dev_dbg(&dbgp.gadget->dev, "setup: desc device\n");
+			len = sizeof device_desc;
+			data = &device_desc;
+			device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
+			break;
+		case USB_DT_DEBUG:
+			dev_dbg(&dbgp.gadget->dev, "setup: desc debug\n");
+			len = sizeof dbg_desc;
+			data = &dbg_desc;
+			break;
+		default:
+			goto fail;
+		}
+		err = 0;
+	} else if (request == USB_REQ_SET_FEATURE &&
+		   value == USB_DEVICE_DEBUG_MODE) {
+		dev_dbg(&dbgp.gadget->dev, "setup: feat debug\n");
+#ifdef CONFIG_USB_G_DBGP_PRINTK
+		err = dbgp_enable_ep();
+#else
+		err = gserial_connect(dbgp.serial, &port_line);
+#endif
+		if (err < 0)
+			goto fail;
+	} else
+		goto fail;
+
+	req->length = min(length, len);
+	req->zero = len < req->length;
+	if (data && req->length)
+		memcpy(req->buf, data, req->length);
+
+	req->complete = dbgp_setup_complete;
+	return usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+
+fail:
+	dev_dbg(&dbgp.gadget->dev,
+		"setup: failure req %x v %x\n", request, value);
+	return err;
+}
+
+static struct usb_gadget_driver dbgp_driver = {
+	.function = "dbgp",
+	.max_speed = USB_SPEED_HIGH,
+	.unbind = dbgp_unbind,
+	.setup = dbgp_setup,
+	.disconnect = dbgp_disconnect,
+	.driver	= {
+		.owner = THIS_MODULE,
+		.name = "dbgp"
+	},
+};
+
+static int __init dbgp_init(void)
+{
+	return usb_gadget_probe_driver(&dbgp_driver, dbgp_bind);
+}
+
+static void __exit dbgp_exit(void)
+{
+	usb_gadget_unregister_driver(&dbgp_driver);
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+	gserial_cleanup();
+#endif
+}
+
+MODULE_AUTHOR("Stephane Duverger");
+MODULE_LICENSE("GPL");
+module_init(dbgp_init);
+module_exit(dbgp_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dummy_hcd.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dummy_hcd.c
new file mode 100644
index 0000000..9a7b436
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dummy_hcd.c
@@ -0,0 +1,2674 @@
+/*
+ * dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003-2005 Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+/*
+ * This exposes a device side "USB gadget" API, driven by requests to a
+ * Linux-USB host controller driver.  USB traffic is simulated; there's
+ * no need for USB hardware.  Use this with two other drivers:
+ *
+ *  - Gadget driver, responding to requests (slave);
+ *  - Host-side device driver, as already familiar in Linux.
+ *
+ * Having this all in one kernel can help some stages of development,
+ * bypassing some hardware (and driver) issues.  UML could help too.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/scatterlist.h>
+
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_DESC	"USB Host+Gadget Emulator"
+#define DRIVER_VERSION	"02 May 2005"
+
+#define POWER_BUDGET	500	/* in mA; use 8 for low-power port testing */
+
+static const char	driver_name[] = "dummy_hcd";
+static const char	driver_desc[] = "USB Host+Gadget Emulator";
+
+static const char	gadget_name[] = "dummy_udc";
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
+
+struct dummy_hcd_module_parameters {
+	bool is_super_speed;
+	bool is_high_speed;
+};
+
+static struct dummy_hcd_module_parameters mod_data = {
+	.is_super_speed = false,
+	.is_high_speed = true,
+};
+module_param_named(is_super_speed, mod_data.is_super_speed, bool, S_IRUGO);
+MODULE_PARM_DESC(is_super_speed, "true to simulate SuperSpeed connection");
+module_param_named(is_high_speed, mod_data.is_high_speed, bool, S_IRUGO);
+MODULE_PARM_DESC(is_high_speed, "true to simulate HighSpeed connection");
+/*-------------------------------------------------------------------------*/
+
+/* gadget side driver data structres */
+struct dummy_ep {
+	struct list_head		queue;
+	unsigned long			last_io;	/* jiffies timestamp */
+	struct usb_gadget		*gadget;
+	const struct usb_endpoint_descriptor *desc;
+	struct usb_ep			ep;
+	unsigned			halted:1;
+	unsigned			wedged:1;
+	unsigned			already_seen:1;
+	unsigned			setup_stage:1;
+	unsigned			stream_en:1;
+};
+
+struct dummy_request {
+	struct list_head		queue;		/* ep's requests */
+	struct usb_request		req;
+};
+
+static inline struct dummy_ep *usb_ep_to_dummy_ep(struct usb_ep *_ep)
+{
+	return container_of(_ep, struct dummy_ep, ep);
+}
+
+static inline struct dummy_request *usb_request_to_dummy_request
+		(struct usb_request *_req)
+{
+	return container_of(_req, struct dummy_request, req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Every device has ep0 for control requests, plus up to 30 more endpoints,
+ * in one of two types:
+ *
+ *   - Configurable:  direction (in/out), type (bulk, iso, etc), and endpoint
+ *     number can be changed.  Names like "ep-a" are used for this type.
+ *
+ *   - Fixed Function:  in other cases.  some characteristics may be mutable;
+ *     that'd be hardware-specific.  Names like "ep12out-bulk" are used.
+ *
+ * Gadget drivers are responsible for not setting up conflicting endpoint
+ * configurations, illegal or unsupported packet lengths, and so on.
+ */
+
+static const char ep0name[] = "ep0";
+
+static const char *const ep_name[] = {
+	ep0name,				/* everyone has ep0 */
+
+	/* act like a pxa250: fifteen fixed function endpoints */
+	"ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
+	"ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
+	"ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
+		"ep15in-int",
+
+	/* or like sa1100: two fixed function endpoints */
+	"ep1out-bulk", "ep2in-bulk",
+
+	/* and now some generic EPs so we have enough in multi config */
+	"ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
+	"ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
+};
+#define DUMMY_ENDPOINTS	ARRAY_SIZE(ep_name)
+
+/*-------------------------------------------------------------------------*/
+
+#define FIFO_SIZE		64
+
+struct urbp {
+	struct urb		*urb;
+	struct list_head	urbp_list;
+	struct sg_mapping_iter	miter;
+	u32			miter_started;
+};
+
+
+enum dummy_rh_state {
+	DUMMY_RH_RESET,
+	DUMMY_RH_SUSPENDED,
+	DUMMY_RH_RUNNING
+};
+
+struct dummy_hcd {
+	struct dummy			*dum;
+	enum dummy_rh_state		rh_state;
+	struct timer_list		timer;
+	u32				port_status;
+	u32				old_status;
+	unsigned long			re_timeout;
+
+	struct usb_device		*udev;
+	struct list_head		urbp_list;
+	u32				stream_en_ep;
+	u8				num_stream[30 / 2];
+
+	unsigned			active:1;
+	unsigned			old_active:1;
+	unsigned			resuming:1;
+};
+
+struct dummy {
+	spinlock_t			lock;
+
+	/*
+	 * SLAVE/GADGET side support
+	 */
+	struct dummy_ep			ep[DUMMY_ENDPOINTS];
+	int				address;
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	struct dummy_request		fifo_req;
+	u8				fifo_buf[FIFO_SIZE];
+	u16				devstatus;
+	unsigned			udc_suspended:1;
+	unsigned			pullup:1;
+
+	/*
+	 * MASTER/HOST side support
+	 */
+	struct dummy_hcd		*hs_hcd;
+	struct dummy_hcd		*ss_hcd;
+};
+
+static inline struct dummy_hcd *hcd_to_dummy_hcd(struct usb_hcd *hcd)
+{
+	return (struct dummy_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *dummy_hcd_to_hcd(struct dummy_hcd *dum)
+{
+	return container_of((void *) dum, struct usb_hcd, hcd_priv);
+}
+
+static inline struct device *dummy_dev(struct dummy_hcd *dum)
+{
+	return dummy_hcd_to_hcd(dum)->self.controller;
+}
+
+static inline struct device *udc_dev(struct dummy *dum)
+{
+	return dum->gadget.dev.parent;
+}
+
+static inline struct dummy *ep_to_dummy(struct dummy_ep *ep)
+{
+	return container_of(ep->gadget, struct dummy, gadget);
+}
+
+static inline struct dummy_hcd *gadget_to_dummy_hcd(struct usb_gadget *gadget)
+{
+	struct dummy *dum = container_of(gadget, struct dummy, gadget);
+	if (dum->gadget.speed == USB_SPEED_SUPER)
+		return dum->ss_hcd;
+	else
+		return dum->hs_hcd;
+}
+
+static inline struct dummy *gadget_dev_to_dummy(struct device *dev)
+{
+	return container_of(dev, struct dummy, gadget.dev);
+}
+
+static struct dummy			the_controller;
+
+/*-------------------------------------------------------------------------*/
+
+/* SLAVE/GADGET SIDE UTILITY ROUTINES */
+
+/* called with spinlock held */
+static void nuke(struct dummy *dum, struct dummy_ep *ep)
+{
+	while (!list_empty(&ep->queue)) {
+		struct dummy_request	*req;
+
+		req = list_entry(ep->queue.next, struct dummy_request, queue);
+		list_del_init(&req->queue);
+		req->req.status = -ESHUTDOWN;
+
+		spin_unlock(&dum->lock);
+		req->req.complete(&ep->ep, &req->req);
+		spin_lock(&dum->lock);
+	}
+}
+
+/* caller must hold lock */
+static void stop_activity(struct dummy *dum)
+{
+	struct dummy_ep	*ep;
+
+	/* prevent any more requests */
+	dum->address = 0;
+
+	/* The timer is left running so that outstanding URBs can fail */
+
+	/* nuke any pending requests first, so driver i/o is quiesced */
+	list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list)
+		nuke(dum, ep);
+
+	/* driver now does any non-usb quiescing necessary */
+}
+
+/**
+ * set_link_state_by_speed() - Sets the current state of the link according to
+ *	the hcd speed
+ * @dum_hcd: pointer to the dummy_hcd structure to update the link state for
+ *
+ * This function updates the port_status according to the link state and the
+ * speed of the hcd.
+ */
+static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
+{
+	struct dummy *dum = dum_hcd->dum;
+
+	if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) {
+		if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) == 0) {
+			dum_hcd->port_status = 0;
+		} else if (!dum->pullup || dum->udc_suspended) {
+			/* UDC suspend must cause a disconnect */
+			dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
+						USB_PORT_STAT_ENABLE);
+			if ((dum_hcd->old_status &
+			     USB_PORT_STAT_CONNECTION) != 0)
+				dum_hcd->port_status |=
+					(USB_PORT_STAT_C_CONNECTION << 16);
+		} else {
+			/* device is connected and not suspended */
+			dum_hcd->port_status |= (USB_PORT_STAT_CONNECTION |
+						 USB_PORT_STAT_SPEED_5GBPS) ;
+			if ((dum_hcd->old_status &
+			     USB_PORT_STAT_CONNECTION) == 0)
+				dum_hcd->port_status |=
+					(USB_PORT_STAT_C_CONNECTION << 16);
+			if ((dum_hcd->port_status &
+			     USB_PORT_STAT_ENABLE) == 1 &&
+				(dum_hcd->port_status &
+				 USB_SS_PORT_LS_U0) == 1 &&
+				dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+				dum_hcd->active = 1;
+		}
+	} else {
+		if ((dum_hcd->port_status & USB_PORT_STAT_POWER) == 0) {
+			dum_hcd->port_status = 0;
+		} else if (!dum->pullup || dum->udc_suspended) {
+			/* UDC suspend must cause a disconnect */
+			dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
+						USB_PORT_STAT_ENABLE |
+						USB_PORT_STAT_LOW_SPEED |
+						USB_PORT_STAT_HIGH_SPEED |
+						USB_PORT_STAT_SUSPEND);
+			if ((dum_hcd->old_status &
+			     USB_PORT_STAT_CONNECTION) != 0)
+				dum_hcd->port_status |=
+					(USB_PORT_STAT_C_CONNECTION << 16);
+		} else {
+			dum_hcd->port_status |= USB_PORT_STAT_CONNECTION;
+			if ((dum_hcd->old_status &
+			     USB_PORT_STAT_CONNECTION) == 0)
+				dum_hcd->port_status |=
+					(USB_PORT_STAT_C_CONNECTION << 16);
+			if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0)
+				dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
+			else if ((dum_hcd->port_status &
+				  USB_PORT_STAT_SUSPEND) == 0 &&
+					dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+				dum_hcd->active = 1;
+		}
+	}
+}
+
+/* caller must hold lock */
+static void set_link_state(struct dummy_hcd *dum_hcd)
+{
+	struct dummy *dum = dum_hcd->dum;
+
+	dum_hcd->active = 0;
+	if (dum->pullup)
+		if ((dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 &&
+		     dum->gadget.speed != USB_SPEED_SUPER) ||
+		    (dummy_hcd_to_hcd(dum_hcd)->speed != HCD_USB3 &&
+		     dum->gadget.speed == USB_SPEED_SUPER))
+			return;
+
+	set_link_state_by_speed(dum_hcd);
+
+	if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
+	     dum_hcd->active)
+		dum_hcd->resuming = 0;
+
+	/* if !connected or reset */
+	if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
+			(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
+		/*
+		 * We're connected and not reset (reset occurred now),
+		 * and driver attached - disconnect!
+		 */
+		if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) != 0 &&
+		    (dum_hcd->old_status & USB_PORT_STAT_RESET) == 0 &&
+		    dum->driver) {
+			stop_activity(dum);
+			spin_unlock(&dum->lock);
+			dum->driver->disconnect(&dum->gadget);
+			spin_lock(&dum->lock);
+		}
+	} else if (dum_hcd->active != dum_hcd->old_active) {
+		if (dum_hcd->old_active && dum->driver->suspend) {
+			spin_unlock(&dum->lock);
+			dum->driver->suspend(&dum->gadget);
+			spin_lock(&dum->lock);
+		} else if (!dum_hcd->old_active &&  dum->driver->resume) {
+			spin_unlock(&dum->lock);
+			dum->driver->resume(&dum->gadget);
+			spin_lock(&dum->lock);
+		}
+	}
+
+	dum_hcd->old_status = dum_hcd->port_status;
+	dum_hcd->old_active = dum_hcd->active;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* SLAVE/GADGET SIDE DRIVER
+ *
+ * This only tracks gadget state.  All the work is done when the host
+ * side tries some (emulated) i/o operation.  Real device controller
+ * drivers would do real i/o using dma, fifos, irqs, timers, etc.
+ */
+
+#define is_enabled(dum) \
+	(dum->port_status & USB_PORT_STAT_ENABLE)
+
+static int dummy_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct dummy		*dum;
+	struct dummy_hcd	*dum_hcd;
+	struct dummy_ep		*ep;
+	unsigned		max;
+	int			retval;
+
+	ep = usb_ep_to_dummy_ep(_ep);
+	if (!_ep || !desc || ep->desc || _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+	dum = ep_to_dummy(ep);
+	if (!dum->driver)
+		return -ESHUTDOWN;
+
+	dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+	if (!is_enabled(dum_hcd))
+		return -ESHUTDOWN;
+
+	/*
+	 * For HS/FS devices only bits 0..10 of the wMaxPacketSize represent the
+	 * maximum packet size.
+	 * For SS devices the wMaxPacketSize is limited by 1024.
+	 */
+	max = usb_endpoint_maxp(desc) & 0x7ff;
+
+	/* drivers must not request bad settings, since lower levels
+	 * (hardware or its drivers) may not check.  some endpoints
+	 * can't do iso, many have maxpacket limitations, etc.
+	 *
+	 * since this "hardware" driver is here to help debugging, we
+	 * have some extra sanity checks.  (there could be more though,
+	 * especially for "ep9out" style fixed function ones.)
+	 */
+	retval = -EINVAL;
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_BULK:
+		if (strstr(ep->ep.name, "-iso")
+				|| strstr(ep->ep.name, "-int")) {
+			goto done;
+		}
+		switch (dum->gadget.speed) {
+		case USB_SPEED_SUPER:
+			if (max == 1024)
+				break;
+			goto done;
+		case USB_SPEED_HIGH:
+			if (max == 512)
+				break;
+			goto done;
+		case USB_SPEED_FULL:
+			if (max == 8 || max == 16 || max == 32 || max == 64)
+				/* we'll fake any legal size */
+				break;
+			/* save a return statement */
+		default:
+			goto done;
+		}
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
+			goto done;
+		/* real hardware might not handle all packet sizes */
+		switch (dum->gadget.speed) {
+		case USB_SPEED_SUPER:
+		case USB_SPEED_HIGH:
+			if (max <= 1024)
+				break;
+			/* save a return statement */
+		case USB_SPEED_FULL:
+			if (max <= 64)
+				break;
+			/* save a return statement */
+		default:
+			if (max <= 8)
+				break;
+			goto done;
+		}
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (strstr(ep->ep.name, "-bulk")
+				|| strstr(ep->ep.name, "-int"))
+			goto done;
+		/* real hardware might not handle all packet sizes */
+		switch (dum->gadget.speed) {
+		case USB_SPEED_SUPER:
+		case USB_SPEED_HIGH:
+			if (max <= 1024)
+				break;
+			/* save a return statement */
+		case USB_SPEED_FULL:
+			if (max <= 1023)
+				break;
+			/* save a return statement */
+		default:
+			goto done;
+		}
+		break;
+	default:
+		/* few chips support control except on ep0 */
+		goto done;
+	}
+
+	_ep->maxpacket = max;
+	if (usb_ss_max_streams(_ep->comp_desc)) {
+		if (!usb_endpoint_xfer_bulk(desc)) {
+			dev_err(udc_dev(dum), "Can't enable stream support on "
+					"non-bulk ep %s\n", _ep->name);
+			return -EINVAL;
+		}
+		ep->stream_en = 1;
+	}
+	ep->desc = desc;
+
+	dev_dbg(udc_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d stream %s\n",
+		_ep->name,
+		desc->bEndpointAddress & 0x0f,
+		(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+		({ char *val;
+		 switch (usb_endpoint_type(desc)) {
+		 case USB_ENDPOINT_XFER_BULK:
+			 val = "bulk";
+			 break;
+		 case USB_ENDPOINT_XFER_ISOC:
+			 val = "iso";
+			 break;
+		 case USB_ENDPOINT_XFER_INT:
+			 val = "intr";
+			 break;
+		 default:
+			 val = "ctrl";
+			 break;
+		 }; val; }),
+		max, ep->stream_en ? "enabled" : "disabled");
+
+	/* at this point real hardware should be NAKing transfers
+	 * to that endpoint, until a buffer is queued to it.
+	 */
+	ep->halted = ep->wedged = 0;
+	retval = 0;
+done:
+	return retval;
+}
+
+static int dummy_disable(struct usb_ep *_ep)
+{
+	struct dummy_ep		*ep;
+	struct dummy		*dum;
+	unsigned long		flags;
+	int			retval;
+
+	ep = usb_ep_to_dummy_ep(_ep);
+	if (!_ep || !ep->desc || _ep->name == ep0name)
+		return -EINVAL;
+	dum = ep_to_dummy(ep);
+
+	spin_lock_irqsave(&dum->lock, flags);
+	ep->desc = NULL;
+	ep->stream_en = 0;
+	retval = 0;
+	nuke(dum, ep);
+	spin_unlock_irqrestore(&dum->lock, flags);
+
+	dev_dbg(udc_dev(dum), "disabled %s\n", _ep->name);
+	return retval;
+}
+
+static struct usb_request *dummy_alloc_request(struct usb_ep *_ep,
+		gfp_t mem_flags)
+{
+	struct dummy_ep		*ep;
+	struct dummy_request	*req;
+
+	if (!_ep)
+		return NULL;
+	ep = usb_ep_to_dummy_ep(_ep);
+
+	req = kzalloc(sizeof(*req), mem_flags);
+	if (!req)
+		return NULL;
+	INIT_LIST_HEAD(&req->queue);
+	return &req->req;
+}
+
+static void dummy_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct dummy_ep		*ep;
+	struct dummy_request	*req;
+
+	if (!_ep || !_req)
+		return;
+	ep = usb_ep_to_dummy_ep(_ep);
+	if (!ep->desc && _ep->name != ep0name)
+		return;
+
+	req = usb_request_to_dummy_request(_req);
+	WARN_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+static void fifo_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+static int dummy_queue(struct usb_ep *_ep, struct usb_request *_req,
+		gfp_t mem_flags)
+{
+	struct dummy_ep		*ep;
+	struct dummy_request	*req;
+	struct dummy		*dum;
+	struct dummy_hcd	*dum_hcd;
+	unsigned long		flags;
+
+	req = usb_request_to_dummy_request(_req);
+	if (!_req || !list_empty(&req->queue) || !_req->complete)
+		return -EINVAL;
+
+	ep = usb_ep_to_dummy_ep(_ep);
+	if (!_ep || (!ep->desc && _ep->name != ep0name))
+		return -EINVAL;
+
+	dum = ep_to_dummy(ep);
+	dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+	if (!dum->driver || !is_enabled(dum_hcd))
+		return -ESHUTDOWN;
+
+#if 0
+	dev_dbg(udc_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n",
+			ep, _req, _ep->name, _req->length, _req->buf);
+#endif
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+	spin_lock_irqsave(&dum->lock, flags);
+
+	/* implement an emulated single-request FIFO */
+	if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
+			list_empty(&dum->fifo_req.queue) &&
+			list_empty(&ep->queue) &&
+			_req->length <= FIFO_SIZE) {
+		req = &dum->fifo_req;
+		req->req = *_req;
+		req->req.buf = dum->fifo_buf;
+		memcpy(dum->fifo_buf, _req->buf, _req->length);
+		req->req.context = dum;
+		req->req.complete = fifo_complete;
+
+		list_add_tail(&req->queue, &ep->queue);
+		spin_unlock(&dum->lock);
+		_req->actual = _req->length;
+		_req->status = 0;
+		_req->complete(_ep, _req);
+		spin_lock(&dum->lock);
+	}  else
+		list_add_tail(&req->queue, &ep->queue);
+	spin_unlock_irqrestore(&dum->lock, flags);
+
+	/* real hardware would likely enable transfers here, in case
+	 * it'd been left NAKing.
+	 */
+	return 0;
+}
+
+static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct dummy_ep		*ep;
+	struct dummy		*dum;
+	int			retval = -EINVAL;
+	unsigned long		flags;
+	struct dummy_request	*req = NULL;
+
+	if (!_ep || !_req)
+		return retval;
+	ep = usb_ep_to_dummy_ep(_ep);
+	dum = ep_to_dummy(ep);
+
+	if (!dum->driver)
+		return -ESHUTDOWN;
+
+	local_irq_save(flags);
+	spin_lock(&dum->lock);
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req) {
+			list_del_init(&req->queue);
+			_req->status = -ECONNRESET;
+			retval = 0;
+			break;
+		}
+	}
+	spin_unlock(&dum->lock);
+
+	if (retval == 0) {
+		dev_dbg(udc_dev(dum),
+				"dequeued req %p from %s, len %d buf %p\n",
+				req, _ep->name, _req->length, _req->buf);
+		_req->complete(_ep, _req);
+	}
+	local_irq_restore(flags);
+	return retval;
+}
+
+static int
+dummy_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+	struct dummy_ep		*ep;
+	struct dummy		*dum;
+
+	if (!_ep)
+		return -EINVAL;
+	ep = usb_ep_to_dummy_ep(_ep);
+	dum = ep_to_dummy(ep);
+	if (!dum->driver)
+		return -ESHUTDOWN;
+	if (!value)
+		ep->halted = ep->wedged = 0;
+	else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
+			!list_empty(&ep->queue))
+		return -EAGAIN;
+	else {
+		ep->halted = 1;
+		if (wedged)
+			ep->wedged = 1;
+	}
+	/* FIXME clear emulated data toggle too */
+	return 0;
+}
+
+static int
+dummy_set_halt(struct usb_ep *_ep, int value)
+{
+	return dummy_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int dummy_set_wedge(struct usb_ep *_ep)
+{
+	if (!_ep || _ep->name == ep0name)
+		return -EINVAL;
+	return dummy_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static const struct usb_ep_ops dummy_ep_ops = {
+	.enable		= dummy_enable,
+	.disable	= dummy_disable,
+
+	.alloc_request	= dummy_alloc_request,
+	.free_request	= dummy_free_request,
+
+	.queue		= dummy_queue,
+	.dequeue	= dummy_dequeue,
+
+	.set_halt	= dummy_set_halt,
+	.set_wedge	= dummy_set_wedge,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* there are both host and device side versions of this call ... */
+static int dummy_g_get_frame(struct usb_gadget *_gadget)
+{
+	struct timeval	tv;
+
+	do_gettimeofday(&tv);
+	return tv.tv_usec / 1000;
+}
+
+static int dummy_wakeup(struct usb_gadget *_gadget)
+{
+	struct dummy_hcd *dum_hcd;
+
+	dum_hcd = gadget_to_dummy_hcd(_gadget);
+	if (!(dum_hcd->dum->devstatus & ((1 << USB_DEVICE_B_HNP_ENABLE)
+				| (1 << USB_DEVICE_REMOTE_WAKEUP))))
+		return -EINVAL;
+	if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0)
+		return -ENOLINK;
+	if ((dum_hcd->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
+			 dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+		return -EIO;
+
+	/* FIXME: What if the root hub is suspended but the port isn't? */
+
+	/* hub notices our request, issues downstream resume, etc */
+	dum_hcd->resuming = 1;
+	dum_hcd->re_timeout = jiffies + msecs_to_jiffies(20);
+	mod_timer(&dummy_hcd_to_hcd(dum_hcd)->rh_timer, dum_hcd->re_timeout);
+	return 0;
+}
+
+static int dummy_set_selfpowered(struct usb_gadget *_gadget, int value)
+{
+	struct dummy	*dum;
+
+	dum = gadget_to_dummy_hcd(_gadget)->dum;
+	if (value)
+		dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
+	else
+		dum->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+	return 0;
+}
+
+static void dummy_udc_update_ep0(struct dummy *dum)
+{
+	if (dum->gadget.speed == USB_SPEED_SUPER)
+		dum->ep[0].ep.maxpacket = 9;
+	else
+		dum->ep[0].ep.maxpacket = 64;
+}
+
+static int dummy_pullup(struct usb_gadget *_gadget, int value)
+{
+	struct dummy_hcd *dum_hcd;
+	struct dummy	*dum;
+	unsigned long	flags;
+
+	dum = gadget_dev_to_dummy(&_gadget->dev);
+
+	if (value && dum->driver) {
+		if (mod_data.is_super_speed)
+			dum->gadget.speed = dum->driver->max_speed;
+		else if (mod_data.is_high_speed)
+			dum->gadget.speed = min_t(u8, USB_SPEED_HIGH,
+					dum->driver->max_speed);
+		else
+			dum->gadget.speed = USB_SPEED_FULL;
+		dummy_udc_update_ep0(dum);
+
+		if (dum->gadget.speed < dum->driver->max_speed)
+			dev_dbg(udc_dev(dum), "This device can perform faster"
+				" if you connect it to a %s port...\n",
+				usb_speed_string(dum->driver->max_speed));
+	}
+	dum_hcd = gadget_to_dummy_hcd(_gadget);
+
+	spin_lock_irqsave(&dum->lock, flags);
+	dum->pullup = (value != 0);
+	set_link_state(dum_hcd);
+	spin_unlock_irqrestore(&dum->lock, flags);
+
+	usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+	return 0;
+}
+
+static int dummy_udc_start(struct usb_gadget *g,
+		struct usb_gadget_driver *driver);
+static int dummy_udc_stop(struct usb_gadget *g,
+		struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops dummy_ops = {
+	.get_frame	= dummy_g_get_frame,
+	.wakeup		= dummy_wakeup,
+	.set_selfpowered = dummy_set_selfpowered,
+	.pullup		= dummy_pullup,
+	.udc_start	= dummy_udc_start,
+	.udc_stop	= dummy_udc_stop,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* "function" sysfs attribute */
+static ssize_t show_function(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct dummy	*dum = gadget_dev_to_dummy(dev);
+
+	if (!dum->driver || !dum->driver->function)
+		return 0;
+	return scnprintf(buf, PAGE_SIZE, "%s\n", dum->driver->function);
+}
+static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Driver registration/unregistration.
+ *
+ * This is basically hardware-specific; there's usually only one real USB
+ * device (not host) controller since that's how USB devices are intended
+ * to work.  So most implementations of these api calls will rely on the
+ * fact that only one driver will ever bind to the hardware.  But curious
+ * hardware can be built with discrete components, so the gadget API doesn't
+ * require that assumption.
+ *
+ * For this emulator, it might be convenient to create a usb slave device
+ * for each driver that registers:  just add to a big root hub.
+ */
+
+static int dummy_udc_start(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct dummy_hcd	*dum_hcd = gadget_to_dummy_hcd(g);
+	struct dummy		*dum = dum_hcd->dum;
+
+	if (driver->max_speed == USB_SPEED_UNKNOWN)
+		return -EINVAL;
+
+	/*
+	 * SLAVE side init ... the layer above hardware, which
+	 * can't enumerate without help from the driver we're binding.
+	 */
+
+	dum->devstatus = 0;
+
+	dum->driver = driver;
+	dev_dbg(udc_dev(dum), "binding gadget driver '%s'\n",
+			driver->driver.name);
+	return 0;
+}
+
+static int dummy_udc_stop(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct dummy_hcd	*dum_hcd = gadget_to_dummy_hcd(g);
+	struct dummy		*dum = dum_hcd->dum;
+
+	dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
+			driver->driver.name);
+
+	dum->driver = NULL;
+
+	return 0;
+}
+
+#undef is_enabled
+
+/* The gadget structure is stored inside the hcd structure and will be
+ * released along with it. */
+static void dummy_gadget_release(struct device *dev)
+{
+	return;
+}
+
+static void init_dummy_udc_hw(struct dummy *dum)
+{
+	int i;
+
+	INIT_LIST_HEAD(&dum->gadget.ep_list);
+	for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+		struct dummy_ep	*ep = &dum->ep[i];
+
+		if (!ep_name[i])
+			break;
+		ep->ep.name = ep_name[i];
+		ep->ep.ops = &dummy_ep_ops;
+		list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list);
+		ep->halted = ep->wedged = ep->already_seen =
+				ep->setup_stage = 0;
+		ep->ep.maxpacket = ~0;
+		ep->ep.max_streams = 16;
+		ep->last_io = jiffies;
+		ep->gadget = &dum->gadget;
+		ep->desc = NULL;
+		INIT_LIST_HEAD(&ep->queue);
+	}
+
+	dum->gadget.ep0 = &dum->ep[0].ep;
+	list_del_init(&dum->ep[0].ep.ep_list);
+	INIT_LIST_HEAD(&dum->fifo_req.queue);
+
+#ifdef CONFIG_USB_OTG
+	dum->gadget.is_otg = 1;
+#endif
+}
+
+static int dummy_udc_probe(struct platform_device *pdev)
+{
+	struct dummy	*dum = &the_controller;
+	int		rc;
+
+	dum->gadget.name = gadget_name;
+	dum->gadget.ops = &dummy_ops;
+	dum->gadget.max_speed = USB_SPEED_SUPER;
+
+	dev_set_name(&dum->gadget.dev, "gadget");
+	dum->gadget.dev.parent = &pdev->dev;
+	dum->gadget.dev.release = dummy_gadget_release;
+	rc = device_register(&dum->gadget.dev);
+	if (rc < 0) {
+		put_device(&dum->gadget.dev);
+		return rc;
+	}
+
+	init_dummy_udc_hw(dum);
+
+	rc = usb_add_gadget_udc(&pdev->dev, &dum->gadget);
+	if (rc < 0)
+		goto err_udc;
+
+	rc = device_create_file(&dum->gadget.dev, &dev_attr_function);
+	if (rc < 0)
+		goto err_dev;
+	platform_set_drvdata(pdev, dum);
+	return rc;
+
+err_dev:
+	usb_del_gadget_udc(&dum->gadget);
+err_udc:
+	device_unregister(&dum->gadget.dev);
+	return rc;
+}
+
+static int dummy_udc_remove(struct platform_device *pdev)
+{
+	struct dummy	*dum = platform_get_drvdata(pdev);
+
+	usb_del_gadget_udc(&dum->gadget);
+	platform_set_drvdata(pdev, NULL);
+	device_remove_file(&dum->gadget.dev, &dev_attr_function);
+	device_unregister(&dum->gadget.dev);
+	return 0;
+}
+
+static void dummy_udc_pm(struct dummy *dum, struct dummy_hcd *dum_hcd,
+		int suspend)
+{
+	spin_lock_irq(&dum->lock);
+	dum->udc_suspended = suspend;
+	set_link_state(dum_hcd);
+	spin_unlock_irq(&dum->lock);
+}
+
+static int dummy_udc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct dummy		*dum = platform_get_drvdata(pdev);
+	struct dummy_hcd	*dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+	dummy_udc_pm(dum, dum_hcd, 1);
+	usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+	return 0;
+}
+
+static int dummy_udc_resume(struct platform_device *pdev)
+{
+	struct dummy		*dum = platform_get_drvdata(pdev);
+	struct dummy_hcd	*dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+	dummy_udc_pm(dum, dum_hcd, 0);
+	usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+	return 0;
+}
+
+static struct platform_driver dummy_udc_driver = {
+	.probe		= dummy_udc_probe,
+	.remove		= dummy_udc_remove,
+	.suspend	= dummy_udc_suspend,
+	.resume		= dummy_udc_resume,
+	.driver		= {
+		.name	= (char *) gadget_name,
+		.owner	= THIS_MODULE,
+	},
+};
+
+/*-------------------------------------------------------------------------*/
+
+static unsigned int dummy_get_ep_idx(const struct usb_endpoint_descriptor *desc)
+{
+	unsigned int index;
+
+	index = usb_endpoint_num(desc) << 1;
+	if (usb_endpoint_dir_in(desc))
+		index |= 1;
+	return index;
+}
+
+/* MASTER/HOST SIDE DRIVER
+ *
+ * this uses the hcd framework to hook up to host side drivers.
+ * its root hub will only have one device, otherwise it acts like
+ * a normal host controller.
+ *
+ * when urbs are queued, they're just stuck on a list that we
+ * scan in a timer callback.  that callback connects writes from
+ * the host with reads from the device, and so on, based on the
+ * usb 2.0 rules.
+ */
+
+static int dummy_ep_stream_en(struct dummy_hcd *dum_hcd, struct urb *urb)
+{
+	const struct usb_endpoint_descriptor *desc = &urb->ep->desc;
+	u32 index;
+
+	if (!usb_endpoint_xfer_bulk(desc))
+		return 0;
+
+	index = dummy_get_ep_idx(desc);
+	return (1 << index) & dum_hcd->stream_en_ep;
+}
+
+/*
+ * The max stream number is saved as a nibble so for the 30 possible endpoints
+ * we only 15 bytes of memory. Therefore we are limited to max 16 streams (0
+ * means we use only 1 stream). The maximum according to the spec is 16bit so
+ * if the 16 stream limit is about to go, the array size should be incremented
+ * to 30 elements of type u16.
+ */
+static int get_max_streams_for_pipe(struct dummy_hcd *dum_hcd,
+		unsigned int pipe)
+{
+	int max_streams;
+
+	max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)];
+	if (usb_pipeout(pipe))
+		max_streams >>= 4;
+	else
+		max_streams &= 0xf;
+	max_streams++;
+	return max_streams;
+}
+
+static void set_max_streams_for_pipe(struct dummy_hcd *dum_hcd,
+		unsigned int pipe, unsigned int streams)
+{
+	int max_streams;
+
+	streams--;
+	max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)];
+	if (usb_pipeout(pipe)) {
+		streams <<= 4;
+		max_streams &= 0xf;
+	} else {
+		max_streams &= 0xf0;
+	}
+	max_streams |= streams;
+	dum_hcd->num_stream[usb_pipeendpoint(pipe)] = max_streams;
+}
+
+static int dummy_validate_stream(struct dummy_hcd *dum_hcd, struct urb *urb)
+{
+	unsigned int max_streams;
+	int enabled;
+
+	enabled = dummy_ep_stream_en(dum_hcd, urb);
+	if (!urb->stream_id) {
+		if (enabled)
+			return -EINVAL;
+		return 0;
+	}
+	if (!enabled)
+		return -EINVAL;
+
+	max_streams = get_max_streams_for_pipe(dum_hcd,
+			usb_pipeendpoint(urb->pipe));
+	if (urb->stream_id > max_streams) {
+		dev_err(dummy_dev(dum_hcd), "Stream id %d is out of range.\n",
+				urb->stream_id);
+		BUG();
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int dummy_urb_enqueue(
+	struct usb_hcd			*hcd,
+	struct urb			*urb,
+	gfp_t				mem_flags
+) {
+	struct dummy_hcd *dum_hcd;
+	struct urbp	*urbp;
+	unsigned long	flags;
+	int		rc;
+
+	urbp = kmalloc(sizeof *urbp, mem_flags);
+	if (!urbp)
+		return -ENOMEM;
+	urbp->urb = urb;
+	urbp->miter_started = 0;
+
+	dum_hcd = hcd_to_dummy_hcd(hcd);
+	spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+
+	rc = dummy_validate_stream(dum_hcd, urb);
+	if (rc) {
+		kfree(urbp);
+		goto done;
+	}
+
+	rc = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (rc) {
+		kfree(urbp);
+		goto done;
+	}
+
+	if (!dum_hcd->udev) {
+		dum_hcd->udev = urb->dev;
+		usb_get_dev(dum_hcd->udev);
+	} else if (unlikely(dum_hcd->udev != urb->dev))
+		dev_err(dummy_dev(dum_hcd), "usb_device address has changed!\n");
+
+	list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
+	urb->hcpriv = urbp;
+	if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
+		urb->error_count = 1;		/* mark as a new urb */
+
+	/* kick the scheduler, it'll do the rest */
+	if (!timer_pending(&dum_hcd->timer))
+		mod_timer(&dum_hcd->timer, jiffies + 1);
+
+ done:
+	spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+	return rc;
+}
+
+static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct dummy_hcd *dum_hcd;
+	unsigned long	flags;
+	int		rc;
+
+	/* giveback happens automatically in timer callback,
+	 * so make sure the callback happens */
+	dum_hcd = hcd_to_dummy_hcd(hcd);
+	spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+
+	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
+			!list_empty(&dum_hcd->urbp_list))
+		mod_timer(&dum_hcd->timer, jiffies);
+
+	spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+	return rc;
+}
+
+static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
+		u32 len)
+{
+	void *ubuf, *rbuf;
+	struct urbp *urbp = urb->hcpriv;
+	int to_host;
+	struct sg_mapping_iter *miter = &urbp->miter;
+	u32 trans = 0;
+	u32 this_sg;
+	bool next_sg;
+
+	to_host = usb_pipein(urb->pipe);
+	rbuf = req->req.buf + req->req.actual;
+
+	if (!urb->num_sgs) {
+		ubuf = urb->transfer_buffer + urb->actual_length;
+		if (to_host)
+			memcpy(ubuf, rbuf, len);
+		else
+			memcpy(rbuf, ubuf, len);
+		return len;
+	}
+
+	if (!urbp->miter_started) {
+		u32 flags = SG_MITER_ATOMIC;
+
+		if (to_host)
+			flags |= SG_MITER_TO_SG;
+		else
+			flags |= SG_MITER_FROM_SG;
+
+		sg_miter_start(miter, urb->sg, urb->num_sgs, flags);
+		urbp->miter_started = 1;
+	}
+	next_sg = sg_miter_next(miter);
+	if (next_sg == false) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+	do {
+		ubuf = miter->addr;
+		this_sg = min_t(u32, len, miter->length);
+		miter->consumed = this_sg;
+		trans += this_sg;
+
+		if (to_host)
+			memcpy(ubuf, rbuf, this_sg);
+		else
+			memcpy(rbuf, ubuf, this_sg);
+		len -= this_sg;
+
+		if (!len)
+			break;
+		next_sg = sg_miter_next(miter);
+		if (next_sg == false) {
+			WARN_ON_ONCE(1);
+			return -EINVAL;
+		}
+
+		rbuf += this_sg;
+	} while (1);
+
+	sg_miter_stop(miter);
+	return trans;
+}
+
+/* transfer up to a frame's worth; caller must own lock */
+static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb,
+		struct dummy_ep *ep, int limit, int *status)
+{
+	struct dummy		*dum = dum_hcd->dum;
+	struct dummy_request	*req;
+
+top:
+	/* if there's no request queued, the device is NAKing; return */
+	list_for_each_entry(req, &ep->queue, queue) {
+		unsigned	host_len, dev_len, len;
+		int		is_short, to_host;
+		int		rescan = 0;
+
+		if (dummy_ep_stream_en(dum_hcd, urb)) {
+			if ((urb->stream_id != req->req.stream_id))
+				continue;
+		}
+
+		/* 1..N packets of ep->ep.maxpacket each ... the last one
+		 * may be short (including zero length).
+		 *
+		 * writer can send a zlp explicitly (length 0) or implicitly
+		 * (length mod maxpacket zero, and 'zero' flag); they always
+		 * terminate reads.
+		 */
+		host_len = urb->transfer_buffer_length - urb->actual_length;
+		dev_len = req->req.length - req->req.actual;
+		len = min(host_len, dev_len);
+
+		/* FIXME update emulated data toggle too */
+
+		to_host = usb_pipein(urb->pipe);
+		if (unlikely(len == 0))
+			is_short = 1;
+		else {
+			/* not enough bandwidth left? */
+			if (limit < ep->ep.maxpacket && limit < len)
+				break;
+			len = min_t(unsigned, len, limit);
+			if (len == 0)
+				break;
+
+			/* use an extra pass for the final short packet */
+			if (len > ep->ep.maxpacket) {
+				rescan = 1;
+				len -= (len % ep->ep.maxpacket);
+			}
+			is_short = (len % ep->ep.maxpacket) != 0;
+
+			len = dummy_perform_transfer(urb, req, len);
+
+			ep->last_io = jiffies;
+			if ((int)len < 0) {
+				req->req.status = len;
+			} else {
+				limit -= len;
+				urb->actual_length += len;
+				req->req.actual += len;
+			}
+		}
+
+		/* short packets terminate, maybe with overflow/underflow.
+		 * it's only really an error to write too much.
+		 *
+		 * partially filling a buffer optionally blocks queue advances
+		 * (so completion handlers can clean up the queue) but we don't
+		 * need to emulate such data-in-flight.
+		 */
+		if (is_short) {
+			if (host_len == dev_len) {
+				req->req.status = 0;
+				*status = 0;
+			} else if (to_host) {
+				req->req.status = 0;
+				if (dev_len > host_len)
+					*status = -EOVERFLOW;
+				else
+					*status = 0;
+			} else if (!to_host) {
+				*status = 0;
+				if (host_len > dev_len)
+					req->req.status = -EOVERFLOW;
+				else
+					req->req.status = 0;
+			}
+
+		/* many requests terminate without a short packet */
+		} else {
+			if (req->req.length == req->req.actual
+					&& !req->req.zero)
+				req->req.status = 0;
+			if (urb->transfer_buffer_length == urb->actual_length
+					&& !(urb->transfer_flags
+						& URB_ZERO_PACKET))
+				*status = 0;
+		}
+
+		/* device side completion --> continuable */
+		if (req->req.status != -EINPROGRESS) {
+			list_del_init(&req->queue);
+
+			spin_unlock(&dum->lock);
+			req->req.complete(&ep->ep, &req->req);
+			spin_lock(&dum->lock);
+
+			/* requests might have been unlinked... */
+			rescan = 1;
+		}
+
+		/* host side completion --> terminate */
+		if (*status != -EINPROGRESS)
+			break;
+
+		/* rescan to continue with any other queued i/o */
+		if (rescan)
+			goto top;
+	}
+	return limit;
+}
+
+static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
+{
+	int	limit = ep->ep.maxpacket;
+
+	if (dum->gadget.speed == USB_SPEED_HIGH) {
+		int	tmp;
+
+		/* high bandwidth mode */
+		tmp = usb_endpoint_maxp(ep->desc);
+		tmp = (tmp >> 11) & 0x03;
+		tmp *= 8 /* applies to entire frame */;
+		limit += limit * tmp;
+	}
+	if (dum->gadget.speed == USB_SPEED_SUPER) {
+		switch (usb_endpoint_type(ep->desc)) {
+		case USB_ENDPOINT_XFER_ISOC:
+			/* Sec. 4.4.8.2 USB3.0 Spec */
+			limit = 3 * 16 * 1024 * 8;
+			break;
+		case USB_ENDPOINT_XFER_INT:
+			/* Sec. 4.4.7.2 USB3.0 Spec */
+			limit = 3 * 1024 * 8;
+			break;
+		case USB_ENDPOINT_XFER_BULK:
+		default:
+			break;
+		}
+	}
+	return limit;
+}
+
+#define is_active(dum_hcd)	((dum_hcd->port_status & \
+		(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \
+			USB_PORT_STAT_SUSPEND)) \
+		== (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE))
+
+static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
+{
+	int		i;
+
+	if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
+			dum->ss_hcd : dum->hs_hcd)))
+		return NULL;
+	if ((address & ~USB_DIR_IN) == 0)
+		return &dum->ep[0];
+	for (i = 1; i < DUMMY_ENDPOINTS; i++) {
+		struct dummy_ep	*ep = &dum->ep[i];
+
+		if (!ep->desc)
+			continue;
+		if (ep->desc->bEndpointAddress == address)
+			return ep;
+	}
+	return NULL;
+}
+
+#undef is_active
+
+#define Dev_Request	(USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+#define Dev_InRequest	(Dev_Request | USB_DIR_IN)
+#define Intf_Request	(USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
+#define Intf_InRequest	(Intf_Request | USB_DIR_IN)
+#define Ep_Request	(USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
+#define Ep_InRequest	(Ep_Request | USB_DIR_IN)
+
+
+/**
+ * handle_control_request() - handles all control transfers
+ * @dum: pointer to dummy (the_controller)
+ * @urb: the urb request to handle
+ * @setup: pointer to the setup data for a USB device control
+ *	 request
+ * @status: pointer to request handling status
+ *
+ * Return 0 - if the request was handled
+ *	  1 - if the request wasn't handles
+ *	  error code on error
+ */
+static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
+				  struct usb_ctrlrequest *setup,
+				  int *status)
+{
+	struct dummy_ep		*ep2;
+	struct dummy		*dum = dum_hcd->dum;
+	int			ret_val = 1;
+	unsigned	w_index;
+	unsigned	w_value;
+
+	w_index = le16_to_cpu(setup->wIndex);
+	w_value = le16_to_cpu(setup->wValue);
+	switch (setup->bRequest) {
+	case USB_REQ_SET_ADDRESS:
+		if (setup->bRequestType != Dev_Request)
+			break;
+		dum->address = w_value;
+		*status = 0;
+		dev_dbg(udc_dev(dum), "set_address = %d\n",
+				w_value);
+		ret_val = 0;
+		break;
+	case USB_REQ_SET_FEATURE:
+		if (setup->bRequestType == Dev_Request) {
+			ret_val = 0;
+			switch (w_value) {
+			case USB_DEVICE_REMOTE_WAKEUP:
+				break;
+			case USB_DEVICE_B_HNP_ENABLE:
+				dum->gadget.b_hnp_enable = 1;
+				break;
+			case USB_DEVICE_A_HNP_SUPPORT:
+				dum->gadget.a_hnp_support = 1;
+				break;
+			case USB_DEVICE_A_ALT_HNP_SUPPORT:
+				dum->gadget.a_alt_hnp_support = 1;
+				break;
+			case USB_DEVICE_U1_ENABLE:
+				if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+				    HCD_USB3)
+					w_value = USB_DEV_STAT_U1_ENABLED;
+				else
+					ret_val = -EOPNOTSUPP;
+				break;
+			case USB_DEVICE_U2_ENABLE:
+				if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+				    HCD_USB3)
+					w_value = USB_DEV_STAT_U2_ENABLED;
+				else
+					ret_val = -EOPNOTSUPP;
+				break;
+			case USB_DEVICE_LTM_ENABLE:
+				if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+				    HCD_USB3)
+					w_value = USB_DEV_STAT_LTM_ENABLED;
+				else
+					ret_val = -EOPNOTSUPP;
+				break;
+			default:
+				ret_val = -EOPNOTSUPP;
+			}
+			if (ret_val == 0) {
+				dum->devstatus |= (1 << w_value);
+				*status = 0;
+			}
+		} else if (setup->bRequestType == Ep_Request) {
+			/* endpoint halt */
+			ep2 = find_endpoint(dum, w_index);
+			if (!ep2 || ep2->ep.name == ep0name) {
+				ret_val = -EOPNOTSUPP;
+				break;
+			}
+			ep2->halted = 1;
+			ret_val = 0;
+			*status = 0;
+		}
+		break;
+	case USB_REQ_CLEAR_FEATURE:
+		if (setup->bRequestType == Dev_Request) {
+			ret_val = 0;
+			switch (w_value) {
+			case USB_DEVICE_REMOTE_WAKEUP:
+				w_value = USB_DEVICE_REMOTE_WAKEUP;
+				break;
+			case USB_DEVICE_U1_ENABLE:
+				if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+				    HCD_USB3)
+					w_value = USB_DEV_STAT_U1_ENABLED;
+				else
+					ret_val = -EOPNOTSUPP;
+				break;
+			case USB_DEVICE_U2_ENABLE:
+				if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+				    HCD_USB3)
+					w_value = USB_DEV_STAT_U2_ENABLED;
+				else
+					ret_val = -EOPNOTSUPP;
+				break;
+			case USB_DEVICE_LTM_ENABLE:
+				if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+				    HCD_USB3)
+					w_value = USB_DEV_STAT_LTM_ENABLED;
+				else
+					ret_val = -EOPNOTSUPP;
+				break;
+			default:
+				ret_val = -EOPNOTSUPP;
+				break;
+			}
+			if (ret_val == 0) {
+				dum->devstatus &= ~(1 << w_value);
+				*status = 0;
+			}
+		} else if (setup->bRequestType == Ep_Request) {
+			/* endpoint halt */
+			ep2 = find_endpoint(dum, w_index);
+			if (!ep2) {
+				ret_val = -EOPNOTSUPP;
+				break;
+			}
+			if (!ep2->wedged)
+				ep2->halted = 0;
+			ret_val = 0;
+			*status = 0;
+		}
+		break;
+	case USB_REQ_GET_STATUS:
+		if (setup->bRequestType == Dev_InRequest
+				|| setup->bRequestType == Intf_InRequest
+				|| setup->bRequestType == Ep_InRequest) {
+			char *buf;
+			/*
+			 * device: remote wakeup, selfpowered
+			 * interface: nothing
+			 * endpoint: halt
+			 */
+			buf = (char *)urb->transfer_buffer;
+			if (urb->transfer_buffer_length > 0) {
+				if (setup->bRequestType == Ep_InRequest) {
+					ep2 = find_endpoint(dum, w_index);
+					if (!ep2) {
+						ret_val = -EOPNOTSUPP;
+						break;
+					}
+					buf[0] = ep2->halted;
+				} else if (setup->bRequestType ==
+					   Dev_InRequest) {
+					buf[0] = (u8)dum->devstatus;
+				} else
+					buf[0] = 0;
+			}
+			if (urb->transfer_buffer_length > 1)
+				buf[1] = 0;
+			urb->actual_length = min_t(u32, 2,
+				urb->transfer_buffer_length);
+			ret_val = 0;
+			*status = 0;
+		}
+		break;
+	}
+	return ret_val;
+}
+
+/* drive both sides of the transfers; looks like irq handlers to
+ * both drivers except the callbacks aren't in_irq().
+ */
+static void dummy_timer(unsigned long _dum_hcd)
+{
+	struct dummy_hcd	*dum_hcd = (struct dummy_hcd *) _dum_hcd;
+	struct dummy		*dum = dum_hcd->dum;
+	struct urbp		*urbp, *tmp;
+	unsigned long		flags;
+	int			limit, total;
+	int			i;
+
+	/* simplistic model for one frame's bandwidth */
+	switch (dum->gadget.speed) {
+	case USB_SPEED_LOW:
+		total = 8/*bytes*/ * 12/*packets*/;
+		break;
+	case USB_SPEED_FULL:
+		total = 64/*bytes*/ * 19/*packets*/;
+		break;
+	case USB_SPEED_HIGH:
+		total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/;
+		break;
+	case USB_SPEED_SUPER:
+		/* Bus speed is 500000 bytes/ms, so use a little less */
+		total = 490000;
+		break;
+	default:
+		dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
+		return;
+	}
+
+	/* FIXME if HZ != 1000 this will probably misbehave ... */
+
+	/* look at each urb queued by the host side driver */
+	spin_lock_irqsave(&dum->lock, flags);
+
+	if (!dum_hcd->udev) {
+		dev_err(dummy_dev(dum_hcd),
+				"timer fired with no URBs pending?\n");
+		spin_unlock_irqrestore(&dum->lock, flags);
+		return;
+	}
+
+	for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+		if (!ep_name[i])
+			break;
+		dum->ep[i].already_seen = 0;
+	}
+
+restart:
+	list_for_each_entry_safe(urbp, tmp, &dum_hcd->urbp_list, urbp_list) {
+		struct urb		*urb;
+		struct dummy_request	*req;
+		u8			address;
+		struct dummy_ep		*ep = NULL;
+		int			type;
+		int			status = -EINPROGRESS;
+
+		urb = urbp->urb;
+		if (urb->unlinked)
+			goto return_urb;
+		else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
+			continue;
+		type = usb_pipetype(urb->pipe);
+
+		/* used up this frame's non-periodic bandwidth?
+		 * FIXME there's infinite bandwidth for control and
+		 * periodic transfers ... unrealistic.
+		 */
+		if (total <= 0 && type == PIPE_BULK)
+			continue;
+
+		/* find the gadget's ep for this request (if configured) */
+		address = usb_pipeendpoint (urb->pipe);
+		if (usb_pipein(urb->pipe))
+			address |= USB_DIR_IN;
+		ep = find_endpoint(dum, address);
+		if (!ep) {
+			/* set_configuration() disagreement */
+			dev_dbg(dummy_dev(dum_hcd),
+				"no ep configured for urb %p\n",
+				urb);
+			status = -EPROTO;
+			goto return_urb;
+		}
+
+		if (ep->already_seen)
+			continue;
+		ep->already_seen = 1;
+		if (ep == &dum->ep[0] && urb->error_count) {
+			ep->setup_stage = 1;	/* a new urb */
+			urb->error_count = 0;
+		}
+		if (ep->halted && !ep->setup_stage) {
+			/* NOTE: must not be iso! */
+			dev_dbg(dummy_dev(dum_hcd), "ep %s halted, urb %p\n",
+					ep->ep.name, urb);
+			status = -EPIPE;
+			goto return_urb;
+		}
+		/* FIXME make sure both ends agree on maxpacket */
+
+		/* handle control requests */
+		if (ep == &dum->ep[0] && ep->setup_stage) {
+			struct usb_ctrlrequest		setup;
+			int				value = 1;
+
+			setup = *(struct usb_ctrlrequest *) urb->setup_packet;
+			/* paranoia, in case of stale queued data */
+			list_for_each_entry(req, &ep->queue, queue) {
+				list_del_init(&req->queue);
+				req->req.status = -EOVERFLOW;
+				dev_dbg(udc_dev(dum), "stale req = %p\n",
+						req);
+
+				spin_unlock(&dum->lock);
+				req->req.complete(&ep->ep, &req->req);
+				spin_lock(&dum->lock);
+				ep->already_seen = 0;
+				goto restart;
+			}
+
+			/* gadget driver never sees set_address or operations
+			 * on standard feature flags.  some hardware doesn't
+			 * even expose them.
+			 */
+			ep->last_io = jiffies;
+			ep->setup_stage = 0;
+			ep->halted = 0;
+
+			value = handle_control_request(dum_hcd, urb, &setup,
+						       &status);
+
+			/* gadget driver handles all other requests.  block
+			 * until setup() returns; no reentrancy issues etc.
+			 */
+			if (value > 0) {
+				spin_unlock(&dum->lock);
+				value = dum->driver->setup(&dum->gadget,
+						&setup);
+				spin_lock(&dum->lock);
+
+				if (value >= 0) {
+					/* no delays (max 64KB data stage) */
+					limit = 64*1024;
+					goto treat_control_like_bulk;
+				}
+				/* error, see below */
+			}
+
+			if (value < 0) {
+				if (value != -EOPNOTSUPP)
+					dev_dbg(udc_dev(dum),
+						"setup --> %d\n",
+						value);
+				status = -EPIPE;
+				urb->actual_length = 0;
+			}
+
+			goto return_urb;
+		}
+
+		/* non-control requests */
+		limit = total;
+		switch (usb_pipetype(urb->pipe)) {
+		case PIPE_ISOCHRONOUS:
+			/* FIXME is it urb->interval since the last xfer?
+			 * use urb->iso_frame_desc[i].
+			 * complete whether or not ep has requests queued.
+			 * report random errors, to debug drivers.
+			 */
+			limit = max(limit, periodic_bytes(dum, ep));
+			status = -ENOSYS;
+			break;
+
+		case PIPE_INTERRUPT:
+			/* FIXME is it urb->interval since the last xfer?
+			 * this almost certainly polls too fast.
+			 */
+			limit = max(limit, periodic_bytes(dum, ep));
+			/* FALLTHROUGH */
+
+		default:
+treat_control_like_bulk:
+			ep->last_io = jiffies;
+			total = transfer(dum_hcd, urb, ep, limit, &status);
+			break;
+		}
+
+		/* incomplete transfer? */
+		if (status == -EINPROGRESS)
+			continue;
+
+return_urb:
+		list_del(&urbp->urbp_list);
+		kfree(urbp);
+		if (ep)
+			ep->already_seen = ep->setup_stage = 0;
+
+		usb_hcd_unlink_urb_from_ep(dummy_hcd_to_hcd(dum_hcd), urb);
+		spin_unlock(&dum->lock);
+		usb_hcd_giveback_urb(dummy_hcd_to_hcd(dum_hcd), urb, status);
+		spin_lock(&dum->lock);
+
+		goto restart;
+	}
+
+	if (list_empty(&dum_hcd->urbp_list)) {
+		usb_put_dev(dum_hcd->udev);
+		dum_hcd->udev = NULL;
+	} else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+		/* want a 1 msec delay here */
+		mod_timer(&dum_hcd->timer, jiffies + msecs_to_jiffies(1));
+	}
+
+	spin_unlock_irqrestore(&dum->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define PORT_C_MASK \
+	((USB_PORT_STAT_C_CONNECTION \
+	| USB_PORT_STAT_C_ENABLE \
+	| USB_PORT_STAT_C_SUSPEND \
+	| USB_PORT_STAT_C_OVERCURRENT \
+	| USB_PORT_STAT_C_RESET) << 16)
+
+static int dummy_hub_status(struct usb_hcd *hcd, char *buf)
+{
+	struct dummy_hcd	*dum_hcd;
+	unsigned long		flags;
+	int			retval = 0;
+
+	dum_hcd = hcd_to_dummy_hcd(hcd);
+
+	spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+	if (!HCD_HW_ACCESSIBLE(hcd))
+		goto done;
+
+	if (dum_hcd->resuming && time_after_eq(jiffies, dum_hcd->re_timeout)) {
+		dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
+		dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
+		set_link_state(dum_hcd);
+	}
+
+	if ((dum_hcd->port_status & PORT_C_MASK) != 0) {
+		*buf = (1 << 1);
+		dev_dbg(dummy_dev(dum_hcd), "port status 0x%08x has changes\n",
+				dum_hcd->port_status);
+		retval = 1;
+		if (dum_hcd->rh_state == DUMMY_RH_SUSPENDED)
+			usb_hcd_resume_root_hub(hcd);
+	}
+done:
+	spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+	return retval;
+}
+
+static inline void
+ss_hub_descriptor(struct usb_hub_descriptor *desc)
+{
+	memset(desc, 0, sizeof *desc);
+	desc->bDescriptorType = 0x2a;
+	desc->bDescLength = 12;
+	desc->wHubCharacteristics = cpu_to_le16(0x0001);
+	desc->bNbrPorts = 1;
+	desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
+	desc->u.ss.DeviceRemovable = 0xffff;
+}
+
+static inline void hub_descriptor(struct usb_hub_descriptor *desc)
+{
+	memset(desc, 0, sizeof *desc);
+	desc->bDescriptorType = 0x29;
+	desc->bDescLength = 9;
+	desc->wHubCharacteristics = cpu_to_le16(0x0001);
+	desc->bNbrPorts = 1;
+	desc->u.hs.DeviceRemovable[0] = 0xff;
+	desc->u.hs.DeviceRemovable[1] = 0xff;
+}
+
+static int dummy_hub_control(
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength
+) {
+	struct dummy_hcd *dum_hcd;
+	int		retval = 0;
+	unsigned long	flags;
+
+	if (!HCD_HW_ACCESSIBLE(hcd))
+		return -ETIMEDOUT;
+
+	dum_hcd = hcd_to_dummy_hcd(hcd);
+
+	spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+		break;
+	case ClearPortFeature:
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if (hcd->speed == HCD_USB3) {
+				dev_dbg(dummy_dev(dum_hcd),
+					 "USB_PORT_FEAT_SUSPEND req not "
+					 "supported for USB 3.0 roothub\n");
+				goto error;
+			}
+			if (dum_hcd->port_status & USB_PORT_STAT_SUSPEND) {
+				/* 20msec resume signaling */
+				dum_hcd->resuming = 1;
+				dum_hcd->re_timeout = jiffies +
+						msecs_to_jiffies(20);
+			}
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (hcd->speed == HCD_USB3) {
+				if (dum_hcd->port_status & USB_PORT_STAT_POWER)
+					dev_dbg(dummy_dev(dum_hcd),
+						"power-off\n");
+			} else
+				if (dum_hcd->port_status &
+							USB_SS_PORT_STAT_POWER)
+					dev_dbg(dummy_dev(dum_hcd),
+						"power-off\n");
+			/* FALLS THROUGH */
+		default:
+			dum_hcd->port_status &= ~(1 << wValue);
+			set_link_state(dum_hcd);
+		}
+		break;
+	case GetHubDescriptor:
+		if (hcd->speed == HCD_USB3 &&
+				(wLength < USB_DT_SS_HUB_SIZE ||
+				 wValue != (USB_DT_SS_HUB << 8))) {
+			dev_dbg(dummy_dev(dum_hcd),
+				"Wrong hub descriptor type for "
+				"USB 3.0 roothub.\n");
+			goto error;
+		}
+		if (hcd->speed == HCD_USB3)
+			ss_hub_descriptor((struct usb_hub_descriptor *) buf);
+		else
+			hub_descriptor((struct usb_hub_descriptor *) buf);
+		break;
+	case GetHubStatus:
+		*(__le32 *) buf = cpu_to_le32(0);
+		break;
+	case GetPortStatus:
+		if (wIndex != 1)
+			retval = -EPIPE;
+
+		/* whoever resets or resumes must GetPortStatus to
+		 * complete it!!
+		 */
+		if (dum_hcd->resuming &&
+				time_after_eq(jiffies, dum_hcd->re_timeout)) {
+			dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
+			dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
+		}
+		if ((dum_hcd->port_status & USB_PORT_STAT_RESET) != 0 &&
+				time_after_eq(jiffies, dum_hcd->re_timeout)) {
+			dum_hcd->port_status |= (USB_PORT_STAT_C_RESET << 16);
+			dum_hcd->port_status &= ~USB_PORT_STAT_RESET;
+			if (dum_hcd->dum->pullup) {
+				dum_hcd->port_status |= USB_PORT_STAT_ENABLE;
+
+				if (hcd->speed < HCD_USB3) {
+					switch (dum_hcd->dum->gadget.speed) {
+					case USB_SPEED_HIGH:
+						dum_hcd->port_status |=
+						      USB_PORT_STAT_HIGH_SPEED;
+						break;
+					case USB_SPEED_LOW:
+						dum_hcd->dum->gadget.ep0->
+							maxpacket = 8;
+						dum_hcd->port_status |=
+							USB_PORT_STAT_LOW_SPEED;
+						break;
+					default:
+						dum_hcd->dum->gadget.speed =
+							USB_SPEED_FULL;
+						break;
+					}
+				}
+			}
+		}
+		set_link_state(dum_hcd);
+		((__le16 *) buf)[0] = cpu_to_le16(dum_hcd->port_status);
+		((__le16 *) buf)[1] = cpu_to_le16(dum_hcd->port_status >> 16);
+		break;
+	case SetHubFeature:
+		retval = -EPIPE;
+		break;
+	case SetPortFeature:
+		switch (wValue) {
+		case USB_PORT_FEAT_LINK_STATE:
+			if (hcd->speed != HCD_USB3) {
+				dev_dbg(dummy_dev(dum_hcd),
+					 "USB_PORT_FEAT_LINK_STATE req not "
+					 "supported for USB 2.0 roothub\n");
+				goto error;
+			}
+			/*
+			 * Since this is dummy we don't have an actual link so
+			 * there is nothing to do for the SET_LINK_STATE cmd
+			 */
+			break;
+		case USB_PORT_FEAT_U1_TIMEOUT:
+		case USB_PORT_FEAT_U2_TIMEOUT:
+			/* TODO: add suspend/resume support! */
+			if (hcd->speed != HCD_USB3) {
+				dev_dbg(dummy_dev(dum_hcd),
+					 "USB_PORT_FEAT_U1/2_TIMEOUT req not "
+					 "supported for USB 2.0 roothub\n");
+				goto error;
+			}
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			/* Applicable only for USB2.0 hub */
+			if (hcd->speed == HCD_USB3) {
+				dev_dbg(dummy_dev(dum_hcd),
+					 "USB_PORT_FEAT_SUSPEND req not "
+					 "supported for USB 3.0 roothub\n");
+				goto error;
+			}
+			if (dum_hcd->active) {
+				dum_hcd->port_status |= USB_PORT_STAT_SUSPEND;
+
+				/* HNP would happen here; for now we
+				 * assume b_bus_req is always true.
+				 */
+				set_link_state(dum_hcd);
+				if (((1 << USB_DEVICE_B_HNP_ENABLE)
+						& dum_hcd->dum->devstatus) != 0)
+					dev_dbg(dummy_dev(dum_hcd),
+							"no HNP yet!\n");
+			}
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (hcd->speed == HCD_USB3)
+				dum_hcd->port_status |= USB_SS_PORT_STAT_POWER;
+			else
+				dum_hcd->port_status |= USB_PORT_STAT_POWER;
+			set_link_state(dum_hcd);
+			break;
+		case USB_PORT_FEAT_BH_PORT_RESET:
+			/* Applicable only for USB3.0 hub */
+			if (hcd->speed != HCD_USB3) {
+				dev_dbg(dummy_dev(dum_hcd),
+					 "USB_PORT_FEAT_BH_PORT_RESET req not "
+					 "supported for USB 2.0 roothub\n");
+				goto error;
+			}
+			/* FALLS THROUGH */
+		case USB_PORT_FEAT_RESET:
+			/* if it's already enabled, disable */
+			if (hcd->speed == HCD_USB3) {
+				dum_hcd->port_status = 0;
+				dum_hcd->port_status =
+					(USB_SS_PORT_STAT_POWER |
+					 USB_PORT_STAT_CONNECTION |
+					 USB_PORT_STAT_RESET);
+			} else
+				dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
+					| USB_PORT_STAT_LOW_SPEED
+					| USB_PORT_STAT_HIGH_SPEED);
+			/*
+			 * We want to reset device status. All but the
+			 * Self powered feature
+			 */
+			dum_hcd->dum->devstatus &=
+				(1 << USB_DEVICE_SELF_POWERED);
+			/*
+			 * FIXME USB3.0: what is the correct reset signaling
+			 * interval? Is it still 50msec as for HS?
+			 */
+			dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
+			/* FALLS THROUGH */
+		default:
+			if (hcd->speed == HCD_USB3) {
+				if ((dum_hcd->port_status &
+				     USB_SS_PORT_STAT_POWER) != 0) {
+					dum_hcd->port_status |= (1 << wValue);
+					set_link_state(dum_hcd);
+				}
+			} else
+				if ((dum_hcd->port_status &
+				     USB_PORT_STAT_POWER) != 0) {
+					dum_hcd->port_status |= (1 << wValue);
+					set_link_state(dum_hcd);
+				}
+		}
+		break;
+	case GetPortErrorCount:
+		if (hcd->speed != HCD_USB3) {
+			dev_dbg(dummy_dev(dum_hcd),
+				 "GetPortErrorCount req not "
+				 "supported for USB 2.0 roothub\n");
+			goto error;
+		}
+		/* We'll always return 0 since this is a dummy hub */
+		*(__le32 *) buf = cpu_to_le32(0);
+		break;
+	case SetHubDepth:
+		if (hcd->speed != HCD_USB3) {
+			dev_dbg(dummy_dev(dum_hcd),
+				 "SetHubDepth req not supported for "
+				 "USB 2.0 roothub\n");
+			goto error;
+		}
+		break;
+	default:
+		dev_dbg(dummy_dev(dum_hcd),
+			"hub control req%04x v%04x i%04x l%d\n",
+			typeReq, wValue, wIndex, wLength);
+error:
+		/* "protocol stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+
+	if ((dum_hcd->port_status & PORT_C_MASK) != 0)
+		usb_hcd_poll_rh_status(hcd);
+	return retval;
+}
+
+static int dummy_bus_suspend(struct usb_hcd *hcd)
+{
+	struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+
+	dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
+
+	spin_lock_irq(&dum_hcd->dum->lock);
+	dum_hcd->rh_state = DUMMY_RH_SUSPENDED;
+	set_link_state(dum_hcd);
+	hcd->state = HC_STATE_SUSPENDED;
+	spin_unlock_irq(&dum_hcd->dum->lock);
+	return 0;
+}
+
+static int dummy_bus_resume(struct usb_hcd *hcd)
+{
+	struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+	int rc = 0;
+
+	dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
+
+	spin_lock_irq(&dum_hcd->dum->lock);
+	if (!HCD_HW_ACCESSIBLE(hcd)) {
+		rc = -ESHUTDOWN;
+	} else {
+		dum_hcd->rh_state = DUMMY_RH_RUNNING;
+		set_link_state(dum_hcd);
+		if (!list_empty(&dum_hcd->urbp_list))
+			mod_timer(&dum_hcd->timer, jiffies);
+		hcd->state = HC_STATE_RUNNING;
+	}
+	spin_unlock_irq(&dum_hcd->dum->lock);
+	return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
+{
+	int ep = usb_pipeendpoint(urb->pipe);
+
+	return snprintf(buf, size,
+		"urb/%p %s ep%d%s%s len %d/%d\n",
+		urb,
+		({ char *s;
+		switch (urb->dev->speed) {
+		case USB_SPEED_LOW:
+			s = "ls";
+			break;
+		case USB_SPEED_FULL:
+			s = "fs";
+			break;
+		case USB_SPEED_HIGH:
+			s = "hs";
+			break;
+		case USB_SPEED_SUPER:
+			s = "ss";
+			break;
+		default:
+			s = "?";
+			break;
+		 }; s; }),
+		ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
+		({ char *s; \
+		switch (usb_pipetype(urb->pipe)) { \
+		case PIPE_CONTROL: \
+			s = ""; \
+			break; \
+		case PIPE_BULK: \
+			s = "-bulk"; \
+			break; \
+		case PIPE_INTERRUPT: \
+			s = "-int"; \
+			break; \
+		default: \
+			s = "-iso"; \
+			break; \
+		}; s; }),
+		urb->actual_length, urb->transfer_buffer_length);
+}
+
+static ssize_t show_urbs(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usb_hcd		*hcd = dev_get_drvdata(dev);
+	struct dummy_hcd	*dum_hcd = hcd_to_dummy_hcd(hcd);
+	struct urbp		*urbp;
+	size_t			size = 0;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+	list_for_each_entry(urbp, &dum_hcd->urbp_list, urbp_list) {
+		size_t		temp;
+
+		temp = show_urb(buf, PAGE_SIZE - size, urbp->urb);
+		buf += temp;
+		size += temp;
+	}
+	spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+
+	return size;
+}
+static DEVICE_ATTR(urbs, S_IRUGO, show_urbs, NULL);
+
+static int dummy_start_ss(struct dummy_hcd *dum_hcd)
+{
+	init_timer(&dum_hcd->timer);
+	dum_hcd->timer.function = dummy_timer;
+	dum_hcd->timer.data = (unsigned long)dum_hcd;
+	dum_hcd->rh_state = DUMMY_RH_RUNNING;
+	dum_hcd->stream_en_ep = 0;
+	INIT_LIST_HEAD(&dum_hcd->urbp_list);
+	dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+	dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
+	dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
+#ifdef CONFIG_USB_OTG
+	dummy_hcd_to_hcd(dum_hcd)->self.otg_port = 1;
+#endif
+	return 0;
+
+	/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
+	return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
+}
+
+static int dummy_start(struct usb_hcd *hcd)
+{
+	struct dummy_hcd	*dum_hcd = hcd_to_dummy_hcd(hcd);
+
+	/*
+	 * MASTER side init ... we emulate a root hub that'll only ever
+	 * talk to one device (the slave side).  Also appears in sysfs,
+	 * just like more familiar pci-based HCDs.
+	 */
+	if (!usb_hcd_is_primary_hcd(hcd))
+		return dummy_start_ss(dum_hcd);
+
+	spin_lock_init(&dum_hcd->dum->lock);
+	init_timer(&dum_hcd->timer);
+	dum_hcd->timer.function = dummy_timer;
+	dum_hcd->timer.data = (unsigned long)dum_hcd;
+	dum_hcd->rh_state = DUMMY_RH_RUNNING;
+
+	INIT_LIST_HEAD(&dum_hcd->urbp_list);
+
+	hcd->power_budget = POWER_BUDGET;
+	hcd->state = HC_STATE_RUNNING;
+	hcd->uses_new_polling = 1;
+
+#ifdef CONFIG_USB_OTG
+	hcd->self.otg_port = 1;
+#endif
+
+	/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
+	return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
+}
+
+static void dummy_stop(struct usb_hcd *hcd)
+{
+	struct dummy		*dum;
+
+	dum = hcd_to_dummy_hcd(hcd)->dum;
+	device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
+	usb_gadget_unregister_driver(dum->driver);
+	dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int dummy_h_get_frame(struct usb_hcd *hcd)
+{
+	return dummy_g_get_frame(NULL);
+}
+
+static int dummy_setup(struct usb_hcd *hcd)
+{
+	hcd->self.sg_tablesize = ~0;
+	if (usb_hcd_is_primary_hcd(hcd)) {
+		the_controller.hs_hcd = hcd_to_dummy_hcd(hcd);
+		the_controller.hs_hcd->dum = &the_controller;
+		/*
+		 * Mark the first roothub as being USB 2.0.
+		 * The USB 3.0 roothub will be registered later by
+		 * dummy_hcd_probe()
+		 */
+		hcd->speed = HCD_USB2;
+		hcd->self.root_hub->speed = USB_SPEED_HIGH;
+	} else {
+		the_controller.ss_hcd = hcd_to_dummy_hcd(hcd);
+		the_controller.ss_hcd->dum = &the_controller;
+		hcd->speed = HCD_USB3;
+		hcd->self.root_hub->speed = USB_SPEED_SUPER;
+	}
+	return 0;
+}
+
+/* Change a group of bulk endpoints to support multiple stream IDs */
+static int dummy_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+	struct usb_host_endpoint **eps, unsigned int num_eps,
+	unsigned int num_streams, gfp_t mem_flags)
+{
+	struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+	unsigned long flags;
+	int max_stream;
+	int ret_streams = num_streams;
+	unsigned int index;
+	unsigned int i;
+
+	if (!num_eps)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+	for (i = 0; i < num_eps; i++) {
+		index = dummy_get_ep_idx(&eps[i]->desc);
+		if ((1 << index) & dum_hcd->stream_en_ep) {
+			ret_streams = -EINVAL;
+			goto out;
+		}
+		max_stream = usb_ss_max_streams(&eps[i]->ss_ep_comp);
+		if (!max_stream) {
+			ret_streams = -EINVAL;
+			goto out;
+		}
+		if (max_stream < ret_streams) {
+			dev_dbg(dummy_dev(dum_hcd), "Ep 0x%x only supports %u "
+					"stream IDs.\n",
+					eps[i]->desc.bEndpointAddress,
+					max_stream);
+			ret_streams = max_stream;
+		}
+	}
+
+	for (i = 0; i < num_eps; i++) {
+		index = dummy_get_ep_idx(&eps[i]->desc);
+		dum_hcd->stream_en_ep |= 1 << index;
+		set_max_streams_for_pipe(dum_hcd,
+				usb_endpoint_num(&eps[i]->desc), ret_streams);
+	}
+out:
+	spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+	return ret_streams;
+}
+
+/* Reverts a group of bulk endpoints back to not using stream IDs. */
+static int dummy_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+	struct usb_host_endpoint **eps, unsigned int num_eps,
+	gfp_t mem_flags)
+{
+	struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+	unsigned long flags;
+	int ret;
+	unsigned int index;
+	unsigned int i;
+
+	spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+	for (i = 0; i < num_eps; i++) {
+		index = dummy_get_ep_idx(&eps[i]->desc);
+		if (!((1 << index) & dum_hcd->stream_en_ep)) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	for (i = 0; i < num_eps; i++) {
+		index = dummy_get_ep_idx(&eps[i]->desc);
+		dum_hcd->stream_en_ep &= ~(1 << index);
+		set_max_streams_for_pipe(dum_hcd,
+				usb_endpoint_num(&eps[i]->desc), 0);
+	}
+	ret = 0;
+out:
+	spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+	return ret;
+}
+
+static struct hc_driver dummy_hcd = {
+	.description =		(char *) driver_name,
+	.product_desc =		"Dummy host controller",
+	.hcd_priv_size =	sizeof(struct dummy_hcd),
+
+	.flags =		HCD_USB3 | HCD_SHARED,
+
+	.reset =		dummy_setup,
+	.start =		dummy_start,
+	.stop =			dummy_stop,
+
+	.urb_enqueue =		dummy_urb_enqueue,
+	.urb_dequeue =		dummy_urb_dequeue,
+
+	.get_frame_number =	dummy_h_get_frame,
+
+	.hub_status_data =	dummy_hub_status,
+	.hub_control =		dummy_hub_control,
+	.bus_suspend =		dummy_bus_suspend,
+	.bus_resume =		dummy_bus_resume,
+
+	.alloc_streams =	dummy_alloc_streams,
+	.free_streams =		dummy_free_streams,
+};
+
+static int dummy_hcd_probe(struct platform_device *pdev)
+{
+	struct usb_hcd		*hs_hcd;
+	struct usb_hcd		*ss_hcd;
+	int			retval;
+
+	dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
+
+	if (!mod_data.is_super_speed)
+		dummy_hcd.flags = HCD_USB2;
+	hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
+	if (!hs_hcd)
+		return -ENOMEM;
+	hs_hcd->has_tt = 1;
+
+	retval = usb_add_hcd(hs_hcd, 0, 0);
+	if (retval)
+		goto put_usb2_hcd;
+
+	if (mod_data.is_super_speed) {
+		ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
+					dev_name(&pdev->dev), hs_hcd);
+		if (!ss_hcd) {
+			retval = -ENOMEM;
+			goto dealloc_usb2_hcd;
+		}
+
+		retval = usb_add_hcd(ss_hcd, 0, 0);
+		if (retval)
+			goto put_usb3_hcd;
+	}
+	return 0;
+
+put_usb3_hcd:
+	usb_put_hcd(ss_hcd);
+dealloc_usb2_hcd:
+	usb_remove_hcd(hs_hcd);
+put_usb2_hcd:
+	usb_put_hcd(hs_hcd);
+	the_controller.hs_hcd = the_controller.ss_hcd = NULL;
+	return retval;
+}
+
+static int dummy_hcd_remove(struct platform_device *pdev)
+{
+	struct dummy		*dum;
+
+	dum = hcd_to_dummy_hcd(platform_get_drvdata(pdev))->dum;
+
+	if (dum->ss_hcd) {
+		usb_remove_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
+		usb_put_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
+	}
+
+	usb_remove_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
+	usb_put_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
+
+	the_controller.hs_hcd = NULL;
+	the_controller.ss_hcd = NULL;
+
+	return 0;
+}
+
+static int dummy_hcd_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct usb_hcd		*hcd;
+	struct dummy_hcd	*dum_hcd;
+	int			rc = 0;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	hcd = platform_get_drvdata(pdev);
+	dum_hcd = hcd_to_dummy_hcd(hcd);
+	if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+		dev_warn(&pdev->dev, "Root hub isn't suspended!\n");
+		rc = -EBUSY;
+	} else
+		clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	return rc;
+}
+
+static int dummy_hcd_resume(struct platform_device *pdev)
+{
+	struct usb_hcd		*hcd;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	hcd = platform_get_drvdata(pdev);
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	usb_hcd_poll_rh_status(hcd);
+	return 0;
+}
+
+static struct platform_driver dummy_hcd_driver = {
+	.probe		= dummy_hcd_probe,
+	.remove		= dummy_hcd_remove,
+	.suspend	= dummy_hcd_suspend,
+	.resume		= dummy_hcd_resume,
+	.driver		= {
+		.name	= (char *) driver_name,
+		.owner	= THIS_MODULE,
+	},
+};
+
+/*-------------------------------------------------------------------------*/
+
+static struct platform_device *the_udc_pdev;
+static struct platform_device *the_hcd_pdev;
+
+static int __init init(void)
+{
+	int	retval = -ENOMEM;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	if (!mod_data.is_high_speed && mod_data.is_super_speed)
+		return -EINVAL;
+
+	the_hcd_pdev = platform_device_alloc(driver_name, -1);
+	if (!the_hcd_pdev)
+		return retval;
+	the_udc_pdev = platform_device_alloc(gadget_name, -1);
+	if (!the_udc_pdev)
+		goto err_alloc_udc;
+
+	retval = platform_driver_register(&dummy_hcd_driver);
+	if (retval < 0)
+		goto err_register_hcd_driver;
+	retval = platform_driver_register(&dummy_udc_driver);
+	if (retval < 0)
+		goto err_register_udc_driver;
+
+	retval = platform_device_add(the_hcd_pdev);
+	if (retval < 0)
+		goto err_add_hcd;
+	if (!the_controller.hs_hcd ||
+	    (!the_controller.ss_hcd && mod_data.is_super_speed)) {
+		/*
+		 * The hcd was added successfully but its probe function failed
+		 * for some reason.
+		 */
+		retval = -EINVAL;
+		goto err_add_udc;
+	}
+	retval = platform_device_add(the_udc_pdev);
+	if (retval < 0)
+		goto err_add_udc;
+	if (!platform_get_drvdata(the_udc_pdev)) {
+		/*
+		 * The udc was added successfully but its probe function failed
+		 * for some reason.
+		 */
+		retval = -EINVAL;
+		goto err_probe_udc;
+	}
+	return retval;
+
+err_probe_udc:
+	platform_device_del(the_udc_pdev);
+err_add_udc:
+	platform_device_del(the_hcd_pdev);
+err_add_hcd:
+	platform_driver_unregister(&dummy_udc_driver);
+err_register_udc_driver:
+	platform_driver_unregister(&dummy_hcd_driver);
+err_register_hcd_driver:
+	platform_device_put(the_udc_pdev);
+err_alloc_udc:
+	platform_device_put(the_hcd_pdev);
+	return retval;
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	platform_device_unregister(the_udc_pdev);
+	platform_device_unregister(the_hcd_pdev);
+	platform_driver_unregister(&dummy_udc_driver);
+	platform_driver_unregister(&dummy_hcd_driver);
+}
+module_exit(cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dw2_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dw2_udc.c
new file mode 100644
index 0000000..0fc1e14
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/dw2_udc.c
@@ -0,0 +1,3533 @@
+/* linux/drivers/usb/gadget/s3c-hsotg.c

+ *

+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.

+ *		http://www.samsung.com

+ *

+ * Copyright 2008 Openmoko, Inc.

+ * Copyright 2008 Simtec Electronics

+ *      Ben Dooks <ben@simtec.co.uk>

+ *      http://armlinux.simtec.co.uk/

+ *

+ * S3C USB2.0 High-speed / OtG driver

+ *

+ * This program is free software; you can redistribute it and/or modify

+ * it under the terms of the GNU General Public License version 2 as

+ * published by the Free Software Foundation.

+*/

+

+#include <linux/kernel.h>

+#include <linux/module.h>

+#include <linux/spinlock.h>

+#include <linux/interrupt.h>

+#include <linux/platform_device.h>

+#include <linux/dma-mapping.h>

+#include <linux/debugfs.h>

+#include <linux/seq_file.h>

+#include <linux/delay.h>

+#include <linux/io.h>

+#include <linux/slab.h>

+#include <linux/clk.h>

+

+#include <linux/usb/ch9.h>

+#include <linux/usb/gadget.h>

+

+//#include <mach/map.h>

+

+#include <mach/regs-usb-hsotg-phy.h>

+#include <mach/regs-usb-hsotg.h>

+#include <mach/udc-hs.h>

+

+//#include <mach/regs-sys.h>

+//#include <plat/udc-hs.h>

+//#include <plat/cpu.h>

+

+#ifndef DEBUG

+#define DEBUG

+#endif

+#ifndef CONFIG_DYNAMIC_DEBUG

+#define CONFIG_DYNAMIC_DEBUG

+#endif

+#define DMA_ADDR_INVALID (~((dma_addr_t)0))

+

+/* EP0_MPS_LIMIT

+ *

+ * Unfortunately there seems to be a limit of the amount of data that can

+ * be transferred by IN transactions on EP0. This is either 127 bytes or 3

+ * packets (which practically means 1 packet and 63 bytes of data) when the

+ * MPS is set to 64.

+ *

+ * This means if we are wanting to move >127 bytes of data, we need to

+ * split the transactions up, but just doing one packet at a time does

+ * not work (this may be an implicit DATA0 PID on first packet of the

+ * transaction) and doing 2 packets is outside the controller's limits.

+ *

+ * If we try to lower the MPS size for EP0, then no transfers work properly

+ * for EP0, and the system will fail basic enumeration. As no cause for this

+ * has currently been found, we cannot support any large IN transfers for

+ * EP0.

+ */

+#define EP0_MPS_LIMIT	64

+

+struct s3c_hsotg;

+struct s3c_hsotg_req;

+

+/**

+ * struct s3c_hsotg_ep - driver endpoint definition.

+ * @ep: The gadget layer representation of the endpoint.

+ * @name: The driver generated name for the endpoint.

+ * @queue: Queue of requests for this endpoint.

+ * @parent: Reference back to the parent device structure.

+ * @req: The current request that the endpoint is processing. This is

+ *       used to indicate an request has been loaded onto the endpoint

+ *       and has yet to be completed (maybe due to data move, or simply

+ *	 awaiting an ack from the core all the data has been completed).

+ * @debugfs: File entry for debugfs file for this endpoint.

+ * @lock: State lock to protect contents of endpoint.

+ * @dir_in: Set to true if this endpoint is of the IN direction, which

+ *	    means that it is sending data to the Host.

+ * @index: The index for the endpoint registers.

+ * @name: The name array passed to the USB core.

+ * @halted: Set if the endpoint has been halted.

+ * @periodic: Set if this is a periodic ep, such as Interrupt

+ * @sent_zlp: Set if we've sent a zero-length packet.

+ * @total_data: The total number of data bytes done.

+ * @fifo_size: The size of the FIFO (for periodic IN endpoints)

+ * @fifo_load: The amount of data loaded into the FIFO (periodic IN)

+ * @last_load: The offset of data for the last start of request.

+ * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN

+ *

+ * This is the driver's state for each registered enpoint, allowing it

+ * to keep track of transactions that need doing. Each endpoint has a

+ * lock to protect the state, to try and avoid using an overall lock

+ * for the host controller as much as possible.

+ *

+ * For periodic IN endpoints, we have fifo_size and fifo_load to try

+ * and keep track of the amount of data in the periodic FIFO for each

+ * of these as we don't have a status register that tells us how much

+ * is in each of them. (note, this may actually be useless information

+ * as in shared-fifo mode periodic in acts like a single-frame packet

+ * buffer than a fifo)

+ */

+struct s3c_hsotg_ep {

+	struct usb_ep		ep;

+	struct list_head	queue;

+	struct s3c_hsotg	*parent;

+	struct s3c_hsotg_req	*req;

+	struct dentry		*debugfs;

+

+	spinlock_t		lock;

+

+	unsigned long		total_data;

+	unsigned int		size_loaded;

+	unsigned int		last_load;

+	unsigned int		fifo_load;

+	unsigned short		fifo_size;

+

+	unsigned char		dir_in;

+	unsigned char		index;

+

+	unsigned int		halted:1;

+	unsigned int		periodic:1;

+	unsigned int		sent_zlp:1;

+

+	char			name[10];

+};

+

+#define S3C_HSOTG_EPS	(8+1)	/* limit to 9 for the moment */

+

+/**

+ * struct s3c_hsotg - driver state.

+ * @dev: The parent device supplied to the probe function

+ * @driver: USB gadget driver

+ * @plat: The platform specific configuration data.

+ * @regs: The memory area mapped for accessing registers.

+ * @regs_res: The resource that was allocated when claiming register space.

+ * @irq: The IRQ number we are using

+ * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.

+ * @debug_root: root directrory for debugfs.

+ * @debug_file: main status file for debugfs.

+ * @debug_fifo: FIFO status file for debugfs.

+ * @ep0_reply: Request used for ep0 reply.

+ * @ep0_buff: Buffer for EP0 reply data, if needed.

+ * @ctrl_buff: Buffer for EP0 control requests.

+ * @ctrl_req: Request for EP0 control packets.

+ * @eps: The endpoints being supplied to the gadget framework

+ */

+struct s3c_hsotg {

+	struct device		 *dev;

+	struct usb_gadget_driver *driver;

+	struct s3c_hsotg_plat	 *plat;

+

+	void __iomem		*regs;

+	struct resource		*regs_res;

+	int			irq;

+	struct clk		*clk;

+

+	unsigned int		dedicated_fifos:1;

+

+	struct dentry		*debug_root;

+	struct dentry		*debug_file;

+	struct dentry		*debug_fifo;

+

+	struct usb_request	*ep0_reply;

+	struct usb_request	*ctrl_req;

+	u8			ep0_buff[8];

+	u8			ctrl_buff[8];

+

+	struct usb_gadget	gadget;

+	struct s3c_hsotg_ep	eps[];

+};

+

+/**

+ * struct s3c_hsotg_req - data transfer request

+ * @req: The USB gadget request

+ * @queue: The list of requests for the endpoint this is queued for.

+ * @in_progress: Has already had size/packets written to core

+ * @mapped: DMA buffer for this request has been mapped via dma_map_single().

+ */

+struct s3c_hsotg_req {

+	struct usb_request	req;

+	struct list_head	queue;

+	unsigned char		in_progress;

+	unsigned char		mapped;

+};

+

+/* conversion functions */

+static inline struct s3c_hsotg_req *our_req(struct usb_request *req)

+{

+	return container_of(req, struct s3c_hsotg_req, req);

+}

+

+static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)

+{

+	return container_of(ep, struct s3c_hsotg_ep, ep);

+}

+

+static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)

+{

+	return container_of(gadget, struct s3c_hsotg, gadget);

+}

+

+static inline void __orr32(void __iomem *ptr, u32 val)

+{

+	writel(readl(ptr) | val, ptr);

+}

+

+static inline void __bic32(void __iomem *ptr, u32 val)

+{

+	writel(readl(ptr) & ~val, ptr);

+}

+

+/* forward decleration of functions */

+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);

+

+/**

+ * using_dma - return the DMA status of the driver.

+ * @hsotg: The driver state.

+ *

+ * Return true if we're using DMA.

+ *

+ * Currently, we have the DMA support code worked into everywhere

+ * that needs it, but the AMBA DMA implementation in the hardware can

+ * only DMA from 32bit aligned addresses. This means that gadgets such

+ * as the CDC Ethernet cannot work as they often pass packets which are

+ * not 32bit aligned.

+ *

+ * Unfortunately the choice to use DMA or not is global to the controller

+ * and seems to be only settable when the controller is being put through

+ * a core reset. This means we either need to fix the gadgets to take

+ * account of DMA alignment, or add bounce buffers (yuerk).

+ *

+ * Until this issue is sorted out, we always return 'false'.

+ */

+static inline bool using_dma(struct s3c_hsotg *hsotg)

+{

+	return false;	/* support is not complete */

+}

+

+/**

+ * s3c_hsotg_en_gsint - enable one or more of the general interrupt

+ * @hsotg: The device state

+ * @ints: A bitmask of the interrupts to enable

+ */

+static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)

+{

+	u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);

+	u32 new_gsintmsk;

+

+	new_gsintmsk = gsintmsk | ints;

+

+	if (new_gsintmsk != gsintmsk) {

+		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);

+		writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);

+	}

+}

+

+/**

+ * s3c_hsotg_disable_gsint - disable one or more of the general interrupt

+ * @hsotg: The device state

+ * @ints: A bitmask of the interrupts to enable

+ */

+static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)

+{

+	u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);

+	u32 new_gsintmsk;

+

+	new_gsintmsk = gsintmsk & ~ints;

+

+	if (new_gsintmsk != gsintmsk)

+		writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);

+}

+

+/**

+ * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq

+ * @hsotg: The device state

+ * @ep: The endpoint index

+ * @dir_in: True if direction is in.

+ * @en: The enable value, true to enable

+ *

+ * Set or clear the mask for an individual endpoint's interrupt

+ * request.

+ */

+static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,

+				 unsigned int ep, unsigned int dir_in,

+				 unsigned int en)

+{

+	unsigned long flags;

+	u32 bit = 1 << ep;

+	u32 daint;

+

+	if (!dir_in)

+		bit <<= 16;

+

+	local_irq_save(flags);

+	daint = readl(hsotg->regs + S3C_DAINTMSK);

+	if (en)

+		daint |= bit;

+	else

+		daint &= ~bit;

+	writel(daint, hsotg->regs + S3C_DAINTMSK);

+	local_irq_restore(flags);

+}

+

+/**

+ * s3c_hsotg_init_fifo - initialise non-periodic FIFOs

+ * @hsotg: The device instance.

+ */

+static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)

+{

+	unsigned int ep;

+	unsigned int addr;

+	unsigned int size;

+	int timeout;

+	u32 val;

+

+	/* the ryu 2.6.24 release ahs

+	   writel(0x1C0, hsotg->regs + S3C_GRXFSIZ);

+	   writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) |

+		S3C_GNPTXFSIZ_NPTxFDep(0x1C0),

+		hsotg->regs + S3C_GNPTXFSIZ);

+	*/

+

+	/* set FIFO sizes to 2048/1024 */

+

+	writel(2048, hsotg->regs + S3C_GRXFSIZ);

+	writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) |

+	       S3C_GNPTXFSIZ_NPTxFDep(1024),

+	       hsotg->regs + S3C_GNPTXFSIZ);

+

+	/* arange all the rest of the TX FIFOs, as some versions of this

+	 * block have overlapping default addresses. This also ensures

+	 * that if the settings have been changed, then they are set to

+	 * known values. */

+

+	/* start at the end of the GNPTXFSIZ, rounded up */

+	addr = 2048 + 1024;

+	size = 768;

+

+	/* currently we allocate TX FIFOs for all possible endpoints,

+	 * and assume that they are all the same size. */

+

+	for (ep = 1; ep <= 15; ep++) {

+		val = addr;

+		val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT;

+		addr += size;

+

+		writel(val, hsotg->regs + S3C_DPTXFSIZn(ep));

+	}

+

+	/* according to p428 of the design guide, we need to ensure that

+	 * all fifos are flushed before continuing */

+

+	writel(S3C_GRSTCTL_TxFNum(0x10) | S3C_GRSTCTL_TxFFlsh |

+	       S3C_GRSTCTL_RxFFlsh, hsotg->regs + S3C_GRSTCTL);

+

+	/* wait until the fifos are both flushed */

+	timeout = 100;

+	while (1) {

+		val = readl(hsotg->regs + S3C_GRSTCTL);

+

+		if ((val & (S3C_GRSTCTL_TxFFlsh | S3C_GRSTCTL_RxFFlsh)) == 0)

+			break;

+

+		if (--timeout == 0) {

+			dev_err(hsotg->dev,

+				"%s: timeout flushing fifos (GRSTCTL=%08x)\n",

+				__func__, val);

+		}

+

+		udelay(1);

+	}

+

+	dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);

+}

+

+/**

+ * @ep: USB endpoint to allocate request for.

+ * @flags: Allocation flags

+ *

+ * Allocate a new USB request structure appropriate for the specified endpoint

+ */

+static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,

+						      gfp_t flags)

+{

+	struct s3c_hsotg_req *req;

+

+	req = kzalloc(sizeof(struct s3c_hsotg_req), flags);

+	if (!req)

+		return NULL;

+

+	INIT_LIST_HEAD(&req->queue);

+

+	req->req.dma = DMA_ADDR_INVALID;

+	return &req->req;

+}

+

+/**

+ * is_ep_periodic - return true if the endpoint is in periodic mode.

+ * @hs_ep: The endpoint to query.

+ *

+ * Returns true if the endpoint is in periodic mode, meaning it is being

+ * used for an Interrupt or ISO transfer.

+ */

+static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)

+{

+	return hs_ep->periodic;

+}

+

+/**

+ * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request

+ * @hsotg: The device state.

+ * @hs_ep: The endpoint for the request

+ * @hs_req: The request being processed.

+ *

+ * This is the reverse of s3c_hsotg_map_dma(), called for the completion

+ * of a request to ensure the buffer is ready for access by the caller.

+*/

+static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,

+				struct s3c_hsotg_ep *hs_ep,

+				struct s3c_hsotg_req *hs_req)

+{

+	struct usb_request *req = &hs_req->req;

+	enum dma_data_direction dir;

+

+	dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;

+

+	/* ignore this if we're not moving any data */

+	if (hs_req->req.length == 0)

+		return;

+

+	if (hs_req->mapped) {

+		/* we mapped this, so unmap and remove the dma */

+

+		dma_unmap_single(hsotg->dev, req->dma, req->length, dir);

+

+		req->dma = DMA_ADDR_INVALID;

+		hs_req->mapped = 0;

+	} else {

+		dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);

+	}

+}

+

+/**

+ * s3c_hsotg_write_fifo - write packet Data to the TxFIFO

+ * @hsotg: The controller state.

+ * @hs_ep: The endpoint we're going to write for.

+ * @hs_req: The request to write data for.

+ *

+ * This is called when the TxFIFO has some space in it to hold a new

+ * transmission and we have something to give it. The actual setup of

+ * the data size is done elsewhere, so all we have to do is to actually

+ * write the data.

+ *

+ * The return value is zero if there is more space (or nothing was done)

+ * otherwise -ENOSPC is returned if the FIFO space was used up.

+ *

+ * This routine is only needed for PIO

+*/

+static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,

+				struct s3c_hsotg_ep *hs_ep,

+				struct s3c_hsotg_req *hs_req)

+{

+	bool periodic = is_ep_periodic(hs_ep);

+	u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS);

+	int buf_pos = hs_req->req.actual;

+	int to_write = hs_ep->size_loaded;

+	void *data;

+	int can_write;

+	int pkt_round;

+

+	to_write -= (buf_pos - hs_ep->last_load);

+

+	/* if there's nothing to write, get out early */

+	if (to_write == 0)

+		return 0;

+

+	if (periodic && !hsotg->dedicated_fifos) {

+		u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));

+		int size_left;

+		int size_done;

+

+		/* work out how much data was loaded so we can calculate

+		 * how much data is left in the fifo. */

+

+		size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);

+

+		/* if shared fifo, we cannot write anything until the

+		 * previous data has been completely sent.

+		 */

+		if (hs_ep->fifo_load != 0) {

+			s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);

+			return -ENOSPC;

+		}

+

+		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",

+			__func__, size_left,

+			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);

+

+		/* how much of the data has moved */

+		size_done = hs_ep->size_loaded - size_left;

+

+		/* how much data is left in the fifo */

+		can_write = hs_ep->fifo_load - size_done;

+		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",

+			__func__, can_write);

+

+		can_write = hs_ep->fifo_size - can_write;

+		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",

+			__func__, can_write);

+

+		if (can_write <= 0) {

+			s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);

+			return -ENOSPC;

+		}

+	} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {

+		can_write = readl(hsotg->regs + S3C_DTXFSTS(hs_ep->index));

+

+		can_write &= 0xffff;

+		can_write *= 4;

+	} else {

+		if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {

+			dev_dbg(hsotg->dev,

+				"%s: no queue slots available (0x%08x)\n",

+				__func__, gnptxsts);

+

+			s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);

+			return -ENOSPC;

+		}

+

+		can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);

+		can_write *= 4;	/* fifo size is in 32bit quantities. */

+	}

+

+	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",

+		 __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);

+

+	/* limit to 512 bytes of data, it seems at least on the non-periodic

+	 * FIFO, requests of >512 cause the endpoint to get stuck with a

+	 * fragment of the end of the transfer in it.

+	 */

+	if (can_write > 512)

+		can_write = 512;

+

+	/* limit the write to one max-packet size worth of data, but allow

+	 * the transfer to return that it did not run out of fifo space

+	 * doing it. */

+	if (to_write > hs_ep->ep.maxpacket) {

+		to_write = hs_ep->ep.maxpacket;

+

+		s3c_hsotg_en_gsint(hsotg,

+				   periodic ? S3C_GINTSTS_PTxFEmp :

+				   S3C_GINTSTS_NPTxFEmp);

+	}

+

+	/* see if we can write data */

+

+	if (to_write > can_write) {

+		to_write = can_write;

+		pkt_round = to_write % hs_ep->ep.maxpacket;

+

+		/* Not sure, but we probably shouldn't be writing partial

+		 * packets into the FIFO, so round the write down to an

+		 * exact number of packets.

+		 *

+		 * Note, we do not currently check to see if we can ever

+		 * write a full packet or not to the FIFO.

+		 */

+

+		if (pkt_round)

+			to_write -= pkt_round;

+

+		/* enable correct FIFO interrupt to alert us when there

+		 * is more room left. */

+

+		s3c_hsotg_en_gsint(hsotg,

+				   periodic ? S3C_GINTSTS_PTxFEmp :

+				   S3C_GINTSTS_NPTxFEmp);

+	}

+

+	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",

+		 to_write, hs_req->req.length, can_write, buf_pos);

+

+	if (to_write <= 0)

+		return -ENOSPC;

+

+	hs_req->req.actual = buf_pos + to_write;

+	hs_ep->total_data += to_write;

+

+	if (periodic)

+		hs_ep->fifo_load += to_write;

+

+	to_write = DIV_ROUND_UP(to_write, 4);

+	data = hs_req->req.buf + buf_pos;

+

+	writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write);

+

+	return (to_write >= can_write) ? -ENOSPC : 0;

+}

+

+/**

+ * get_ep_limit - get the maximum data legnth for this endpoint

+ * @hs_ep: The endpoint

+ *

+ * Return the maximum data that can be queued in one go on a given endpoint

+ * so that transfers that are too long can be split.

+ */

+static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)

+{

+	int index = hs_ep->index;

+	unsigned maxsize;

+	unsigned maxpkt;

+

+	if (index != 0) {

+		maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1;

+		maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1;

+	} else {

+		maxsize = 64+64;

+		if (hs_ep->dir_in)

+			maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1;

+		else

+			maxpkt = 2;

+	}

+

+	/* we made the constant loading easier above by using +1 */

+	maxpkt--;

+	maxsize--;

+

+	/* constrain by packet count if maxpkts*pktsize is greater

+	 * than the length register size. */

+

+	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)

+		maxsize = maxpkt * hs_ep->ep.maxpacket;

+

+	return maxsize;

+}

+

+/**

+ * s3c_hsotg_start_req - start a USB request from an endpoint's queue

+ * @hsotg: The controller state.

+ * @hs_ep: The endpoint to process a request for

+ * @hs_req: The request to start.

+ * @continuing: True if we are doing more for the current request.

+ *

+ * Start the given request running by setting the endpoint registers

+ * appropriately, and writing any data to the FIFOs.

+ */

+static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,

+				struct s3c_hsotg_ep *hs_ep,

+				struct s3c_hsotg_req *hs_req,

+				bool continuing)

+{

+	struct usb_request *ureq = &hs_req->req;

+	int index = hs_ep->index;

+	int dir_in = hs_ep->dir_in;

+	u32 epctrl_reg;

+	u32 epsize_reg;

+	u32 epsize;

+	u32 ctrl;

+	unsigned length;

+	unsigned packets;

+	unsigned maxreq;

+

+	if (index != 0) {

+		if (hs_ep->req && !continuing) {

+			dev_err(hsotg->dev, "%s: active request\n", __func__);

+			WARN_ON(1);

+			return;

+		} else if (hs_ep->req != hs_req && continuing) {

+			dev_err(hsotg->dev,

+				"%s: continue different req\n", __func__);

+			WARN_ON(1);

+			return;

+		}

+	}

+

+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);

+	epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index);

+

+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",

+		__func__, readl(hsotg->regs + epctrl_reg), index,

+		hs_ep->dir_in ? "in" : "out");

+

+	/* If endpoint is stalled, we will restart request later */

+	ctrl = readl(hsotg->regs + epctrl_reg);

+

+	if (ctrl & S3C_DxEPCTL_Stall) {

+		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);

+		return;

+	}

+

+	length = ureq->length - ureq->actual;

+

+	if (0)

+		dev_dbg(hsotg->dev,

+			"REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",

+			ureq->buf, length, ureq->dma,

+			ureq->no_interrupt, ureq->zero, ureq->short_not_ok);

+

+	maxreq = get_ep_limit(hs_ep);

+	if (length > maxreq) {

+		int round = maxreq % hs_ep->ep.maxpacket;

+

+		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",

+			__func__, length, maxreq, round);

+

+		/* round down to multiple of packets */

+		if (round)

+			maxreq -= round;

+

+		length = maxreq;

+	}

+

+	if (length)

+		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);

+	else

+		packets = 1;	/* send one packet if length is zero. */

+

+	if (dir_in && index != 0)

+		epsize = S3C_DxEPTSIZ_MC(1);

+	else

+		epsize = 0;

+

+	if (index != 0 && ureq->zero) {

+		/* test for the packets being exactly right for the

+		 * transfer */

+

+		if (length == (packets * hs_ep->ep.maxpacket))

+			packets++;

+	}

+

+	epsize |= S3C_DxEPTSIZ_PktCnt(packets);

+	epsize |= S3C_DxEPTSIZ_XferSize(length);

+

+	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",

+		__func__, packets, length, ureq->length, epsize, epsize_reg);

+

+	/* store the request as the current one we're doing */

+	hs_ep->req = hs_req;

+

+	/* write size / packets */

+	writel(epsize, hsotg->regs + epsize_reg);

+

+	if (using_dma(hsotg) && !continuing) {

+		unsigned int dma_reg;

+

+		/* write DMA address to control register, buffer already

+		 * synced by s3c_hsotg_ep_queue().  */

+

+		dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index);

+		writel(ureq->dma, hsotg->regs + dma_reg);

+

+		dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",

+			__func__, ureq->dma, dma_reg);

+	}

+

+	ctrl |= S3C_DxEPCTL_EPEna;	/* ensure ep enabled */

+	ctrl |= S3C_DxEPCTL_USBActEp;

+	ctrl |= S3C_DxEPCTL_CNAK;	/* clear NAK set by core */

+

+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);

+	writel(ctrl, hsotg->regs + epctrl_reg);

+

+	/* set these, it seems that DMA support increments past the end

+	 * of the packet buffer so we need to calculate the length from

+	 * this information. */

+	hs_ep->size_loaded = length;

+	hs_ep->last_load = ureq->actual;

+

+	if (dir_in && !using_dma(hsotg)) {

+		/* set these anyway, we may need them for non-periodic in */

+		hs_ep->fifo_load = 0;

+

+		s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);

+	}

+

+	/* clear the INTknTXFEmpMsk when we start request, more as a aide

+	 * to debugging to see what is going on. */

+	if (dir_in)

+		writel(S3C_DIEPMSK_INTknTXFEmpMsk,

+		       hsotg->regs + S3C_DIEPINT(index));

+

+	/* Note, trying to clear the NAK here causes problems with transmit

+	 * on the S3C6400 ending up with the TXFIFO becoming full. */

+

+	/* check ep is enabled */

+	if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna))

+		dev_warn(hsotg->dev,

+			 "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",

+			 index, readl(hsotg->regs + epctrl_reg));

+

+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",

+		__func__, readl(hsotg->regs + epctrl_reg));

+}

+

+/**

+ * s3c_hsotg_map_dma - map the DMA memory being used for the request

+ * @hsotg: The device state.

+ * @hs_ep: The endpoint the request is on.

+ * @req: The request being processed.

+ *

+ * We've been asked to queue a request, so ensure that the memory buffer

+ * is correctly setup for DMA. If we've been passed an extant DMA address

+ * then ensure the buffer has been synced to memory. If our buffer has no

+ * DMA memory, then we map the memory and mark our request to allow us to

+ * cleanup on completion.

+*/

+static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,

+			     struct s3c_hsotg_ep *hs_ep,

+			     struct usb_request *req)

+{

+	enum dma_data_direction dir;

+	struct s3c_hsotg_req *hs_req = our_req(req);

+

+	dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;

+

+	/* if the length is zero, ignore the DMA data */

+	if (hs_req->req.length == 0)

+		return 0;

+

+	if (req->dma == DMA_ADDR_INVALID) {

+		dma_addr_t dma;

+

+		dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);

+

+		if (unlikely(dma_mapping_error(hsotg->dev, dma)))

+			goto dma_error;

+

+		if (dma & 3) {

+			dev_err(hsotg->dev, "%s: unaligned dma buffer\n",

+				__func__);

+

+			dma_unmap_single(hsotg->dev, dma, req->length, dir);

+			return -EINVAL;

+		}

+

+		hs_req->mapped = 1;

+		req->dma = dma;

+	} else {

+		dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);

+		hs_req->mapped = 0;

+	}

+

+	return 0;

+

+dma_error:

+	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",

+		__func__, req->buf, req->length);

+

+	return -EIO;

+}

+

+static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,

+			      gfp_t gfp_flags)

+{

+	struct s3c_hsotg_req *hs_req = our_req(req);

+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);

+	struct s3c_hsotg *hs = hs_ep->parent;

+	unsigned long irqflags;

+	bool first;

+

+	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",

+		ep->name, req, req->length, req->buf, req->no_interrupt,

+		req->zero, req->short_not_ok);

+

+	/* initialise status of the request */

+	INIT_LIST_HEAD(&hs_req->queue);

+	req->actual = 0;

+	req->status = -EINPROGRESS;

+

+	/* if we're using DMA, sync the buffers as necessary */

+	if (using_dma(hs)) {

+		int ret = s3c_hsotg_map_dma(hs, hs_ep, req);

+		if (ret)

+			return ret;

+	}

+

+	spin_lock_irqsave(&hs_ep->lock, irqflags);

+

+	first = list_empty(&hs_ep->queue);

+	list_add_tail(&hs_req->queue, &hs_ep->queue);

+

+	if (first)

+		s3c_hsotg_start_req(hs, hs_ep, hs_req, false);

+

+	spin_unlock_irqrestore(&hs_ep->lock, irqflags);

+

+	return 0;

+}

+

+static void s3c_hsotg_ep_free_request(struct usb_ep *ep,

+				      struct usb_request *req)

+{

+	struct s3c_hsotg_req *hs_req = our_req(req);

+

+	kfree(hs_req);

+}

+

+/**

+ * s3c_hsotg_complete_oursetup - setup completion callback

+ * @ep: The endpoint the request was on.

+ * @req: The request completed.

+ *

+ * Called on completion of any requests the driver itself

+ * submitted that need cleaning up.

+ */

+static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,

+					struct usb_request *req)

+{

+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);

+	struct s3c_hsotg *hsotg = hs_ep->parent;

+

+	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);

+

+	s3c_hsotg_ep_free_request(ep, req);

+}

+

+/**

+ * ep_from_windex - convert control wIndex value to endpoint

+ * @hsotg: The driver state.

+ * @windex: The control request wIndex field (in host order).

+ *

+ * Convert the given wIndex into a pointer to an driver endpoint

+ * structure, or return NULL if it is not a valid endpoint.

+*/

+static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,

+					   u32 windex)

+{

+	struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];

+	int dir = (windex & USB_DIR_IN) ? 1 : 0;

+	int idx = windex & 0x7F;

+

+	if (windex >= 0x100)

+		return NULL;

+

+	if (idx > S3C_HSOTG_EPS)

+		return NULL;

+

+	if (idx && ep->dir_in != dir)

+		return NULL;

+

+	return ep;

+}

+

+/**

+ * s3c_hsotg_send_reply - send reply to control request

+ * @hsotg: The device state

+ * @ep: Endpoint 0

+ * @buff: Buffer for request

+ * @length: Length of reply.

+ *

+ * Create a request and queue it on the given endpoint. This is useful as

+ * an internal method of sending replies to certain control requests, etc.

+ */

+static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,

+				struct s3c_hsotg_ep *ep,

+				void *buff,

+				int length)

+{

+	struct usb_request *req;

+	int ret;

+

+	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);

+

+	req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);

+	hsotg->ep0_reply = req;

+	if (!req) {

+		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);

+		return -ENOMEM;

+	}

+

+	req->buf = hsotg->ep0_buff;

+	req->length = length;

+	req->zero = 1; /* always do zero-length final transfer */

+	req->complete = s3c_hsotg_complete_oursetup;

+

+	if (length)

+		memcpy(req->buf, buff, length);

+	else

+		ep->sent_zlp = 1;

+

+	ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);

+	if (ret) {

+		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);

+		return ret;

+	}

+

+	return 0;

+}

+

+/**

+ * s3c_hsotg_process_req_status - process request GET_STATUS

+ * @hsotg: The device state

+ * @ctrl: USB control request

+ */

+static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,

+					struct usb_ctrlrequest *ctrl)

+{

+	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];

+	struct s3c_hsotg_ep *ep;

+	__le16 reply;

+	int ret;

+

+	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);

+

+	if (!ep0->dir_in) {

+		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);

+		return -EINVAL;

+	}

+

+	switch (ctrl->bRequestType & USB_RECIP_MASK) {

+	case USB_RECIP_DEVICE:

+		reply = cpu_to_le16(0); /* bit 0 => self powered,

+					 * bit 1 => remote wakeup */

+		break;

+

+	case USB_RECIP_INTERFACE:

+		/* currently, the data result should be zero */

+		reply = cpu_to_le16(0);

+		break;

+

+	case USB_RECIP_ENDPOINT:

+		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));

+		if (!ep)

+			return -ENOENT;

+

+		reply = cpu_to_le16(ep->halted ? 1 : 0);

+		break;

+

+	default:

+		return 0;

+	}

+

+	if (le16_to_cpu(ctrl->wLength) != 2)

+		return -EINVAL;

+

+	ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);

+	if (ret) {

+		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);

+		return ret;

+	}

+

+	return 1;

+}

+

+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);

+

+/**

+ * get_ep_head - return the first request on the endpoint

+ * @hs_ep: The controller endpoint to get

+ *

+ * Get the first request on the endpoint.

+ */

+static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)

+{

+	if (list_empty(&hs_ep->queue))

+		return NULL;

+

+	return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);

+}

+

+/**

+ * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE

+ * @hsotg: The device state

+ * @ctrl: USB control request

+ */

+static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,

+					 struct usb_ctrlrequest *ctrl)

+{

+	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];

+	struct s3c_hsotg_req *hs_req;

+	bool restart;

+	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);

+	struct s3c_hsotg_ep *ep;

+	int ret;

+

+	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",

+		__func__, set ? "SET" : "CLEAR");

+

+	if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {

+		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));

+		if (!ep) {

+			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",

+				__func__, le16_to_cpu(ctrl->wIndex));

+			return -ENOENT;

+		}

+

+		switch (le16_to_cpu(ctrl->wValue)) {

+		case USB_ENDPOINT_HALT:

+			s3c_hsotg_ep_sethalt(&ep->ep, set);

+

+			ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);

+			if (ret) {

+				dev_err(hsotg->dev,

+					"%s: failed to send reply\n", __func__);

+				return ret;

+			}

+

+			if (!set) {

+				/*

+				 * If we have request in progress,

+				 * then complete it

+				 */

+				if (ep->req) {

+					hs_req = ep->req;

+					ep->req = NULL;

+					list_del_init(&hs_req->queue);

+					hs_req->req.complete(&ep->ep,

+							     &hs_req->req);

+				}

+

+				/* If we have pending request, then start it */

+				restart = !list_empty(&ep->queue);

+				if (restart) {

+					hs_req = get_ep_head(ep);

+					s3c_hsotg_start_req(hsotg, ep,

+							    hs_req, false);

+				}

+			}

+

+			break;

+

+		default:

+			return -ENOENT;

+		}

+	} else

+		return -ENOENT;  /* currently only deal with endpoint */

+

+	return 1;

+}

+

+/**

+ * s3c_hsotg_process_control - process a control request

+ * @hsotg: The device state

+ * @ctrl: The control request received

+ *

+ * The controller has received the SETUP phase of a control request, and

+ * needs to work out what to do next (and whether to pass it on to the

+ * gadget driver).

+ */

+static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,

+				      struct usb_ctrlrequest *ctrl)

+{

+	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];

+	int ret = 0;

+	u32 dcfg;

+

+	ep0->sent_zlp = 0;

+

+	dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",

+		 ctrl->bRequest, ctrl->bRequestType,

+		 ctrl->wValue, ctrl->wLength);

+

+	/* record the direction of the request, for later use when enquing

+	 * packets onto EP0. */

+

+	ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;

+	dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);

+

+	/* if we've no data with this request, then the last part of the

+	 * transaction is going to implicitly be IN. */

+	if (ctrl->wLength == 0)

+		ep0->dir_in = 1;

+

+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {

+		switch (ctrl->bRequest) {

+		case USB_REQ_SET_ADDRESS:

+			dcfg = readl(hsotg->regs + S3C_DCFG);

+			dcfg &= ~S3C_DCFG_DevAddr_MASK;

+			dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT;

+			writel(dcfg, hsotg->regs + S3C_DCFG);

+

+			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);

+

+			ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);

+			return;

+

+		case USB_REQ_GET_STATUS:

+			ret = s3c_hsotg_process_req_status(hsotg, ctrl);

+			break;

+

+		case USB_REQ_CLEAR_FEATURE:

+		case USB_REQ_SET_FEATURE:

+			ret = s3c_hsotg_process_req_feature(hsotg, ctrl);

+			break;

+		}

+	}

+

+	/* as a fallback, try delivering it to the driver to deal with */

+

+	if (ret == 0 && hsotg->driver) {

+		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);

+		if (ret < 0)

+			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);

+	}

+

+	/* the request is either unhandlable, or is not formatted correctly

+	 * so respond with a STALL for the status stage to indicate failure.

+	 */

+

+	if (ret < 0) {

+		u32 reg;

+		u32 ctrl;

+

+		dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);

+		reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0;

+

+		/* S3C_DxEPCTL_Stall will be cleared by EP once it has

+		 * taken effect, so no need to clear later. */

+

+		ctrl = readl(hsotg->regs + reg);

+		ctrl |= S3C_DxEPCTL_Stall;

+		ctrl |= S3C_DxEPCTL_CNAK;

+		writel(ctrl, hsotg->regs + reg);

+

+		dev_dbg(hsotg->dev,

+			"written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",

+			ctrl, reg, readl(hsotg->regs + reg));

+

+		/* don't believe we need to anything more to get the EP

+		 * to reply with a STALL packet */

+	}

+}

+

+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);

+

+/**

+ * s3c_hsotg_complete_setup - completion of a setup transfer

+ * @ep: The endpoint the request was on.

+ * @req: The request completed.

+ *

+ * Called on completion of any requests the driver itself submitted for

+ * EP0 setup packets

+ */

+static void s3c_hsotg_complete_setup(struct usb_ep *ep,

+				     struct usb_request *req)

+{

+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);

+	struct s3c_hsotg *hsotg = hs_ep->parent;

+

+	if (req->status < 0) {

+		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);

+		return;

+	}

+

+	if (req->actual == 0)

+		s3c_hsotg_enqueue_setup(hsotg);

+	else

+		s3c_hsotg_process_control(hsotg, req->buf);

+}

+

+/**

+ * s3c_hsotg_enqueue_setup - start a request for EP0 packets

+ * @hsotg: The device state.

+ *

+ * Enqueue a request on EP0 if necessary to received any SETUP packets

+ * received from the host.

+ */

+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)

+{

+	struct usb_request *req = hsotg->ctrl_req;

+	struct s3c_hsotg_req *hs_req = our_req(req);

+	int ret;

+

+	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);

+

+	req->zero = 0;

+	req->length = 8;

+	req->buf = hsotg->ctrl_buff;

+	req->complete = s3c_hsotg_complete_setup;

+

+	if (!list_empty(&hs_req->queue)) {

+		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);

+		return;

+	}

+

+	hsotg->eps[0].dir_in = 0;

+

+	ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);

+	if (ret < 0) {

+		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);

+		/* Don't think there's much we can do other than watch the

+		 * driver fail. */

+	}

+}

+

+/**

+ * s3c_hsotg_complete_request - complete a request given to us

+ * @hsotg: The device state.

+ * @hs_ep: The endpoint the request was on.

+ * @hs_req: The request to complete.

+ * @result: The result code (0 => Ok, otherwise errno)

+ *

+ * The given request has finished, so call the necessary completion

+ * if it has one and then look to see if we can start a new request

+ * on the endpoint.

+ *

+ * Note, expects the ep to already be locked as appropriate.

+*/

+static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,

+				       struct s3c_hsotg_ep *hs_ep,

+				       struct s3c_hsotg_req *hs_req,

+				       int result)

+{

+	bool restart;

+

+	if (!hs_req) {

+		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);

+		return;

+	}

+

+	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",

+		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);

+

+	/* only replace the status if we've not already set an error

+	 * from a previous transaction */

+

+	if (hs_req->req.status == -EINPROGRESS)

+		hs_req->req.status = result;

+

+	hs_ep->req = NULL;

+	list_del_init(&hs_req->queue);

+

+	if (using_dma(hsotg))

+		s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);

+

+	/* call the complete request with the locks off, just in case the

+	 * request tries to queue more work for this endpoint. */

+

+	if (hs_req->req.complete) {

+		spin_unlock(&hs_ep->lock);

+		hs_req->req.complete(&hs_ep->ep, &hs_req->req);

+		spin_lock(&hs_ep->lock);

+	}

+

+	/* Look to see if there is anything else to do. Note, the completion

+	 * of the previous request may have caused a new request to be started

+	 * so be careful when doing this. */

+

+	if (!hs_ep->req && result >= 0) {

+		restart = !list_empty(&hs_ep->queue);

+		if (restart) {

+			hs_req = get_ep_head(hs_ep);

+			s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);

+		}

+	}

+}

+

+/**

+ * s3c_hsotg_complete_request_lock - complete a request given to us (locked)

+ * @hsotg: The device state.

+ * @hs_ep: The endpoint the request was on.

+ * @hs_req: The request to complete.

+ * @result: The result code (0 => Ok, otherwise errno)

+ *

+ * See s3c_hsotg_complete_request(), but called with the endpoint's

+ * lock held.

+*/

+static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,

+					    struct s3c_hsotg_ep *hs_ep,

+					    struct s3c_hsotg_req *hs_req,

+					    int result)

+{

+	unsigned long flags;

+

+	spin_lock_irqsave(&hs_ep->lock, flags);

+	s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);

+	spin_unlock_irqrestore(&hs_ep->lock, flags);

+}

+

+/**

+ * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint

+ * @hsotg: The device state.

+ * @ep_idx: The endpoint index for the data

+ * @size: The size of data in the fifo, in bytes

+ *

+ * The FIFO status shows there is data to read from the FIFO for a given

+ * endpoint, so sort out whether we need to read the data into a request

+ * that has been made for that endpoint.

+ */

+static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)

+{

+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];

+	struct s3c_hsotg_req *hs_req = hs_ep->req;

+	void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx);

+	int to_read;

+	int max_req;

+	int read_ptr;

+

+	if (!hs_req) {

+		u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx));

+		int ptr;

+

+		dev_warn(hsotg->dev,

+			 "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",

+			 __func__, size, ep_idx, epctl);

+

+		/* dump the data from the FIFO, we've nothing we can do */

+		for (ptr = 0; ptr < size; ptr += 4)

+			(void)readl(fifo);

+

+		return;

+	}

+

+	spin_lock(&hs_ep->lock);

+

+	to_read = size;

+	read_ptr = hs_req->req.actual;

+	max_req = hs_req->req.length - read_ptr;

+

+	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",

+		__func__, to_read, max_req, read_ptr, hs_req->req.length);

+

+	if (to_read > max_req) {

+		/* more data appeared than we where willing

+		 * to deal with in this request.

+		 */

+

+		/* currently we don't deal this */

+		WARN_ON_ONCE(1);

+	}

+

+	hs_ep->total_data += to_read;

+	hs_req->req.actual += to_read;

+	to_read = DIV_ROUND_UP(to_read, 4);

+

+	/* note, we might over-write the buffer end by 3 bytes depending on

+	 * alignment of the data. */

+	readsl(fifo, hs_req->req.buf + read_ptr, to_read);

+

+	spin_unlock(&hs_ep->lock);

+}

+

+/**

+ * s3c_hsotg_send_zlp - send zero-length packet on control endpoint

+ * @hsotg: The device instance

+ * @req: The request currently on this endpoint

+ *

+ * Generate a zero-length IN packet request for terminating a SETUP

+ * transaction.

+ *

+ * Note, since we don't write any data to the TxFIFO, then it is

+ * currently believed that we do not need to wait for any space in

+ * the TxFIFO.

+ */

+static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,

+			       struct s3c_hsotg_req *req)

+{

+	u32 ctrl;

+

+	if (!req) {

+		dev_warn(hsotg->dev, "%s: no request?\n", __func__);

+		return;

+	}

+

+	if (req->req.length == 0) {

+		hsotg->eps[0].sent_zlp = 1;

+		s3c_hsotg_enqueue_setup(hsotg);

+		return;

+	}

+

+	hsotg->eps[0].dir_in = 1;

+	hsotg->eps[0].sent_zlp = 1;

+

+	dev_dbg(hsotg->dev, "sending zero-length packet\n");

+

+	/* issue a zero-sized packet to terminate this */

+	writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |

+	       S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0));

+

+	ctrl = readl(hsotg->regs + S3C_DIEPCTL0);

+	ctrl |= S3C_DxEPCTL_CNAK;  /* clear NAK set by core */

+	ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */

+	ctrl |= S3C_DxEPCTL_USBActEp;

+	writel(ctrl, hsotg->regs + S3C_DIEPCTL0);

+}

+

+/**

+ * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO

+ * @hsotg: The device instance

+ * @epnum: The endpoint received from

+ * @was_setup: Set if processing a SetupDone event.

+ *

+ * The RXFIFO has delivered an OutDone event, which means that the data

+ * transfer for an OUT endpoint has been completed, either by a short

+ * packet or by the finish of a transfer.

+*/

+static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,

+				     int epnum, bool was_setup)

+{

+	u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));

+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];

+	struct s3c_hsotg_req *hs_req = hs_ep->req;

+	struct usb_request *req = &hs_req->req;

+	unsigned size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);

+	int result = 0;

+

+	if (!hs_req) {

+		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);

+		return;

+	}

+

+	if (using_dma(hsotg)) {

+		unsigned size_done;

+

+		/* Calculate the size of the transfer by checking how much

+		 * is left in the endpoint size register and then working it

+		 * out from the amount we loaded for the transfer.

+		 *

+		 * We need to do this as DMA pointers are always 32bit aligned

+		 * so may overshoot/undershoot the transfer.

+		 */

+

+		size_done = hs_ep->size_loaded - size_left;

+		size_done += hs_ep->last_load;

+

+		req->actual = size_done;

+	}

+

+	/* if there is more request to do, schedule new transfer */

+	if (req->actual < req->length && size_left == 0) {

+		s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);

+		return;

+	}

+

+	if (req->actual < req->length && req->short_not_ok) {

+		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",

+			__func__, req->actual, req->length);

+

+		/* todo - what should we return here? there's no one else

+		 * even bothering to check the status. */

+	}

+

+	if (epnum == 0) {

+		if (!was_setup && req->complete != s3c_hsotg_complete_setup)

+			s3c_hsotg_send_zlp(hsotg, hs_req);

+	}

+

+	s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);

+}

+

+/**

+ * s3c_hsotg_read_frameno - read current frame number

+ * @hsotg: The device instance

+ *

+ * Return the current frame number

+*/

+static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)

+{

+	u32 dsts;

+

+	dsts = readl(hsotg->regs + S3C_DSTS);

+	dsts &= S3C_DSTS_SOFFN_MASK;

+	dsts >>= S3C_DSTS_SOFFN_SHIFT;

+

+	return dsts;

+}

+

+/**

+ * s3c_hsotg_handle_rx - RX FIFO has data

+ * @hsotg: The device instance

+ *

+ * The IRQ handler has detected that the RX FIFO has some data in it

+ * that requires processing, so find out what is in there and do the

+ * appropriate read.

+ *

+ * The RXFIFO is a true FIFO, the packets coming out are still in packet

+ * chunks, so if you have x packets received on an endpoint you'll get x

+ * FIFO events delivered, each with a packet's worth of data in it.

+ *

+ * When using DMA, we should not be processing events from the RXFIFO

+ * as the actual data should be sent to the memory directly and we turn

+ * on the completion interrupts to get notifications of transfer completion.

+ */

+static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)

+{

+	u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);

+	u32 epnum, status, size;

+

+	WARN_ON(using_dma(hsotg));

+

+	epnum = grxstsr & S3C_GRXSTS_EPNum_MASK;

+	status = grxstsr & S3C_GRXSTS_PktSts_MASK;

+

+	size = grxstsr & S3C_GRXSTS_ByteCnt_MASK;

+	size >>= S3C_GRXSTS_ByteCnt_SHIFT;

+

+	if (1)

+		dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",

+			__func__, grxstsr, size, epnum);

+

+#define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT)

+

+	switch (status >> S3C_GRXSTS_PktSts_SHIFT) {

+	case __status(S3C_GRXSTS_PktSts_GlobalOutNAK):

+		dev_dbg(hsotg->dev, "GlobalOutNAK\n");

+		break;

+

+	case __status(S3C_GRXSTS_PktSts_OutDone):

+		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",

+			s3c_hsotg_read_frameno(hsotg));

+

+		if (!using_dma(hsotg))

+			s3c_hsotg_handle_outdone(hsotg, epnum, false);

+		break;

+

+	case __status(S3C_GRXSTS_PktSts_SetupDone):

+		dev_dbg(hsotg->dev,

+			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",

+			s3c_hsotg_read_frameno(hsotg),

+			readl(hsotg->regs + S3C_DOEPCTL(0)));

+

+		s3c_hsotg_handle_outdone(hsotg, epnum, true);

+		break;

+

+	case __status(S3C_GRXSTS_PktSts_OutRX):

+		s3c_hsotg_rx_data(hsotg, epnum, size);

+		break;

+

+	case __status(S3C_GRXSTS_PktSts_SetupRX):

+		dev_dbg(hsotg->dev,

+			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",

+			s3c_hsotg_read_frameno(hsotg),

+			readl(hsotg->regs + S3C_DOEPCTL(0)));

+

+		s3c_hsotg_rx_data(hsotg, epnum, size);

+		break;

+

+	default:

+		dev_warn(hsotg->dev, "%s: unknown status %08x\n",

+			 __func__, grxstsr);

+

+		s3c_hsotg_dump(hsotg);

+		break;

+	}

+}

+

+/**

+ * s3c_hsotg_ep0_mps - turn max packet size into register setting

+ * @mps: The maximum packet size in bytes.

+*/

+static u32 s3c_hsotg_ep0_mps(unsigned int mps)

+{

+	switch (mps) {

+	case 64:

+		return S3C_D0EPCTL_MPS_64;

+	case 32:

+		return S3C_D0EPCTL_MPS_32;

+	case 16:

+		return S3C_D0EPCTL_MPS_16;

+	case 8:

+		return S3C_D0EPCTL_MPS_8;

+	}

+

+	/* bad max packet size, warn and return invalid result */

+	WARN_ON(1);

+	return (u32)-1;

+}

+

+/**

+ * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field

+ * @hsotg: The driver state.

+ * @ep: The index number of the endpoint

+ * @mps: The maximum packet size in bytes

+ *

+ * Configure the maximum packet size for the given endpoint, updating

+ * the hardware control registers to reflect this.

+ */

+static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,

+				       unsigned int ep, unsigned int mps)

+{

+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];

+	void __iomem *regs = hsotg->regs;

+	u32 mpsval;

+	u32 reg;

+

+	if (ep == 0) {

+		/* EP0 is a special case */

+		mpsval = s3c_hsotg_ep0_mps(mps);

+		if (mpsval > 3)

+			goto bad_mps;

+	} else {

+		if (mps >= S3C_DxEPCTL_MPS_LIMIT+1)

+			goto bad_mps;

+

+		mpsval = mps;

+	}

+

+	hs_ep->ep.maxpacket = mps;

+

+	/* update both the in and out endpoint controldir_ registers, even

+	 * if one of the directions may not be in use. */

+

+	reg = readl(regs + S3C_DIEPCTL(ep));

+	reg &= ~S3C_DxEPCTL_MPS_MASK;

+	reg |= mpsval;

+	writel(reg, regs + S3C_DIEPCTL(ep));

+

+	if (ep) {

+		reg = readl(regs + S3C_DOEPCTL(ep));

+		reg &= ~S3C_DxEPCTL_MPS_MASK;

+		reg |= mpsval;

+		writel(reg, regs + S3C_DOEPCTL(ep));

+	}

+

+	return;

+

+bad_mps:

+	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);

+}

+

+/**

+ * s3c_hsotg_txfifo_flush - flush Tx FIFO

+ * @hsotg: The driver state

+ * @idx: The index for the endpoint (0..15)

+ */

+static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)

+{

+	int timeout;

+	int val;

+

+	writel(S3C_GRSTCTL_TxFNum(idx) | S3C_GRSTCTL_TxFFlsh,

+		hsotg->regs + S3C_GRSTCTL);

+

+	/* wait until the fifo is flushed */

+	timeout = 100;

+

+	while (1) {

+		val = readl(hsotg->regs + S3C_GRSTCTL);

+

+		if ((val & (S3C_GRSTCTL_TxFFlsh)) == 0)

+			break;

+

+		if (--timeout == 0) {

+			dev_err(hsotg->dev,

+				"%s: timeout flushing fifo (GRSTCTL=%08x)\n",

+				__func__, val);

+		}

+

+		udelay(1);

+	}

+}

+

+/**

+ * s3c_hsotg_trytx - check to see if anything needs transmitting

+ * @hsotg: The driver state

+ * @hs_ep: The driver endpoint to check.

+ *

+ * Check to see if there is a request that has data to send, and if so

+ * make an attempt to write data into the FIFO.

+ */

+static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,

+			   struct s3c_hsotg_ep *hs_ep)

+{

+	struct s3c_hsotg_req *hs_req = hs_ep->req;

+

+	if (!hs_ep->dir_in || !hs_req)

+		return 0;

+

+	if (hs_req->req.actual < hs_req->req.length) {

+		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",

+			hs_ep->index);

+		return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);

+	}

+

+	return 0;

+}

+

+/**

+ * s3c_hsotg_complete_in - complete IN transfer

+ * @hsotg: The device state.

+ * @hs_ep: The endpoint that has just completed.

+ *

+ * An IN transfer has been completed, update the transfer's state and then

+ * call the relevant completion routines.

+ */

+static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,

+				  struct s3c_hsotg_ep *hs_ep)

+{

+	struct s3c_hsotg_req *hs_req = hs_ep->req;

+	u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));

+	int size_left, size_done;

+

+	if (!hs_req) {

+		dev_dbg(hsotg->dev, "XferCompl but no req\n");

+		return;

+	}

+

+	/* Calculate the size of the transfer by checking how much is left

+	 * in the endpoint size register and then working it out from

+	 * the amount we loaded for the transfer.

+	 *

+	 * We do this even for DMA, as the transfer may have incremented

+	 * past the end of the buffer (DMA transfers are always 32bit

+	 * aligned).

+	 */

+

+	size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);

+

+	size_done = hs_ep->size_loaded - size_left;

+	size_done += hs_ep->last_load;

+

+	if (hs_req->req.actual != size_done)

+		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",

+			__func__, hs_req->req.actual, size_done);

+

+	hs_req->req.actual = size_done;

+

+	/* if we did all of the transfer, and there is more data left

+	 * around, then try restarting the rest of the request */

+

+	if (!size_left && hs_req->req.actual < hs_req->req.length) {

+		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);

+		s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);

+	} else

+		s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);

+}

+

+/**

+ * s3c_hsotg_epint - handle an in/out endpoint interrupt

+ * @hsotg: The driver state

+ * @idx: The index for the endpoint (0..15)

+ * @dir_in: Set if this is an IN endpoint

+ *

+ * Process and clear any interrupt pending for an individual endpoint

+*/

+static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,

+			    int dir_in)

+{

+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];

+	u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx);

+	u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx);

+	u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx);

+	u32 ints;

+

+	ints = readl(hsotg->regs + epint_reg);

+

+	/* Clear endpoint interrupts */

+	writel(ints, hsotg->regs + epint_reg);

+

+	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",

+		__func__, idx, dir_in ? "in" : "out", ints);

+

+	if (ints & S3C_DxEPINT_XferCompl) {

+		dev_dbg(hsotg->dev,

+			"%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",

+			__func__, readl(hsotg->regs + epctl_reg),

+			readl(hsotg->regs + epsiz_reg));

+

+		/* we get OutDone from the FIFO, so we only need to look

+		 * at completing IN requests here */

+		if (dir_in) {

+			s3c_hsotg_complete_in(hsotg, hs_ep);

+

+			if (idx == 0 && !hs_ep->req)

+				s3c_hsotg_enqueue_setup(hsotg);

+		} else if (using_dma(hsotg)) {

+			/* We're using DMA, we need to fire an OutDone here

+			 * as we ignore the RXFIFO. */

+

+			s3c_hsotg_handle_outdone(hsotg, idx, false);

+		}

+	}

+

+	if (ints & S3C_DxEPINT_EPDisbld) {

+		dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);

+

+		if (dir_in) {

+			int epctl = readl(hsotg->regs + epctl_reg);

+

+			s3c_hsotg_txfifo_flush(hsotg, idx);

+

+			if ((epctl & S3C_DxEPCTL_Stall) &&

+				(epctl & S3C_DxEPCTL_EPType_Bulk)) {

+				int dctl = readl(hsotg->regs + S3C_DCTL);

+

+				dctl |= S3C_DCTL_CGNPInNAK;

+				writel(dctl, hsotg->regs + S3C_DCTL);

+			}

+		}

+	}

+

+	if (ints & S3C_DxEPINT_AHBErr)

+		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);

+

+	if (ints & S3C_DxEPINT_Setup) {  /* Setup or Timeout */

+		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);

+

+		if (using_dma(hsotg) && idx == 0) {

+			/* this is the notification we've received a

+			 * setup packet. In non-DMA mode we'd get this

+			 * from the RXFIFO, instead we need to process

+			 * the setup here. */

+

+			if (dir_in)

+				WARN_ON_ONCE(1);

+			else

+				s3c_hsotg_handle_outdone(hsotg, 0, true);

+		}

+	}

+

+	if (ints & S3C_DxEPINT_Back2BackSetup)

+		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);

+

+	if (dir_in) {

+		/* not sure if this is important, but we'll clear it anyway

+		 */

+		if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) {

+			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",

+				__func__, idx);

+		}

+

+		/* this probably means something bad is happening */

+		if (ints & S3C_DIEPMSK_INTknEPMisMsk) {

+			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",

+				 __func__, idx);

+		}

+

+		/* FIFO has space or is empty (see GAHBCFG) */

+		if (hsotg->dedicated_fifos &&

+		    ints & S3C_DIEPMSK_TxFIFOEmpty) {

+			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",

+				__func__, idx);

+			if (!using_dma(hsotg))

+				s3c_hsotg_trytx(hsotg, hs_ep);

+		}

+	}

+}

+

+/**

+ * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)

+ * @hsotg: The device state.

+ *

+ * Handle updating the device settings after the enumeration phase has

+ * been completed.

+*/

+static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)

+{

+	u32 dsts = readl(hsotg->regs + S3C_DSTS);

+	int ep0_mps = 0, ep_mps;

+

+	/* This should signal the finish of the enumeration phase

+	 * of the USB handshaking, so we should now know what rate

+	 * we connected at. */

+

+	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);

+

+	/* note, since we're limited by the size of transfer on EP0, and

+	 * it seems IN transfers must be a even number of packets we do

+	 * not advertise a 64byte MPS on EP0. */

+

+	/* catch both EnumSpd_FS and EnumSpd_FS48 */

+	switch (dsts & S3C_DSTS_EnumSpd_MASK) {

+	case S3C_DSTS_EnumSpd_FS:

+	case S3C_DSTS_EnumSpd_FS48:

+		hsotg->gadget.speed = USB_SPEED_FULL;

+		ep0_mps = EP0_MPS_LIMIT;

+		ep_mps = 64;

+		break;

+

+	case S3C_DSTS_EnumSpd_HS:

+		hsotg->gadget.speed = USB_SPEED_HIGH;

+		ep0_mps = EP0_MPS_LIMIT;

+		ep_mps = 512;

+		break;

+

+	case S3C_DSTS_EnumSpd_LS:

+		hsotg->gadget.speed = USB_SPEED_LOW;

+		/* note, we don't actually support LS in this driver at the

+		 * moment, and the documentation seems to imply that it isn't

+		 * supported by the PHYs on some of the devices.

+		 */

+		break;

+	}

+	dev_info(hsotg->dev, "new device is %s\n",

+		 usb_speed_string(hsotg->gadget.speed));

+

+	/* we should now know the maximum packet size for an

+	 * endpoint, so set the endpoints to a default value. */

+

+	if (ep0_mps) {

+		int i;

+		s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);

+		for (i = 1; i < S3C_HSOTG_EPS; i++)

+			s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);

+	}

+

+	/* ensure after enumeration our EP0 is active */

+

+	s3c_hsotg_enqueue_setup(hsotg);

+

+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",

+		readl(hsotg->regs + S3C_DIEPCTL0),

+		readl(hsotg->regs + S3C_DOEPCTL0));

+}

+

+/**

+ * kill_all_requests - remove all requests from the endpoint's queue

+ * @hsotg: The device state.

+ * @ep: The endpoint the requests may be on.

+ * @result: The result code to use.

+ * @force: Force removal of any current requests

+ *

+ * Go through the requests on the given endpoint and mark them

+ * completed with the given result code.

+ */

+static void kill_all_requests(struct s3c_hsotg *hsotg,

+			      struct s3c_hsotg_ep *ep,

+			      int result, bool force)

+{

+	struct s3c_hsotg_req *req, *treq;

+	unsigned long flags;

+

+	spin_lock_irqsave(&ep->lock, flags);

+

+	list_for_each_entry_safe(req, treq, &ep->queue, queue) {

+		/* currently, we can't do much about an already

+		 * running request on an in endpoint */

+

+		if (ep->req == req && ep->dir_in && !force)

+			continue;

+

+		s3c_hsotg_complete_request(hsotg, ep, req,

+					   result);

+	}

+

+	spin_unlock_irqrestore(&ep->lock, flags);

+}

+

+#define call_gadget(_hs, _entry) \

+	if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN &&	\

+	    (_hs)->driver && (_hs)->driver->_entry)	\

+		(_hs)->driver->_entry(&(_hs)->gadget);

+

+/**

+ * s3c_hsotg_disconnect_irq - disconnect irq service

+ * @hsotg: The device state.

+ *

+ * A disconnect IRQ has been received, meaning that the host has

+ * lost contact with the bus. Remove all current transactions

+ * and signal the gadget driver that this has happened.

+*/

+static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg)

+{

+	unsigned ep;

+

+	for (ep = 0; ep < S3C_HSOTG_EPS; ep++)

+		kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);

+

+	call_gadget(hsotg, disconnect);

+}

+

+/**

+ * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler

+ * @hsotg: The device state:

+ * @periodic: True if this is a periodic FIFO interrupt

+ */

+static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)

+{

+	struct s3c_hsotg_ep *ep;

+	int epno, ret;

+

+	/* look through for any more data to transmit */

+

+	for (epno = 0; epno < S3C_HSOTG_EPS; epno++) {

+		ep = &hsotg->eps[epno];

+

+		if (!ep->dir_in)

+			continue;

+

+		if ((periodic && !ep->periodic) ||

+		    (!periodic && ep->periodic))

+			continue;

+

+		ret = s3c_hsotg_trytx(hsotg, ep);

+		if (ret < 0)

+			break;

+	}

+}

+

+static struct s3c_hsotg *our_hsotg;

+

+/* IRQ flags which will trigger a retry around the IRQ loop */

+#define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \

+			S3C_GINTSTS_PTxFEmp |  \

+			S3C_GINTSTS_RxFLvl)

+

+/**

+ * s3c_hsotg_irq - handle device interrupt

+ * @irq: The IRQ number triggered

+ * @pw: The pw value when registered the handler.

+ */

+static irqreturn_t s3c_hsotg_irq(int irq, void *pw)

+{

+	struct s3c_hsotg *hsotg = pw;

+	int retry_count = 8;

+	u32 gintsts;

+	u32 gintmsk;

+

+irq_retry:

+	gintsts = readl(hsotg->regs + S3C_GINTSTS);

+	gintmsk = readl(hsotg->regs + S3C_GINTMSK);

+

+	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",

+		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);

+

+	gintsts &= gintmsk;

+

+	if (gintsts & S3C_GINTSTS_OTGInt) {

+		u32 otgint = readl(hsotg->regs + S3C_GOTGINT);

+

+		dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);

+

+		writel(otgint, hsotg->regs + S3C_GOTGINT);

+	}

+

+	if (gintsts & S3C_GINTSTS_DisconnInt) {

+		dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__);

+		writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS);

+

+		s3c_hsotg_disconnect_irq(hsotg);

+	}

+

+	if (gintsts & S3C_GINTSTS_SessReqInt) {

+		dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);

+		writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS);

+	}

+

+	if (gintsts & S3C_GINTSTS_EnumDone) {

+		writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS);

+

+		s3c_hsotg_irq_enumdone(hsotg);

+	}

+

+	if (gintsts & S3C_GINTSTS_ConIDStsChng) {

+		dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",

+			readl(hsotg->regs + S3C_DSTS),

+			readl(hsotg->regs + S3C_GOTGCTL));

+

+		writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS);

+	}

+

+	if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) {

+		u32 daint = readl(hsotg->regs + S3C_DAINT);

+		u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT;

+		u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT);

+		int ep;

+

+		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);

+

+		for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {

+			if (daint_out & 1)

+				s3c_hsotg_epint(hsotg, ep, 0);

+		}

+

+		for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {

+			if (daint_in & 1)

+				s3c_hsotg_epint(hsotg, ep, 1);

+		}

+	}

+

+	if (gintsts & S3C_GINTSTS_USBRst) {

+		dev_info(hsotg->dev, "%s: USBRst\n", __func__);

+		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",

+			readl(hsotg->regs + S3C_GNPTXSTS));

+

+		writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS);

+

+		kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true);

+

+		/* it seems after a reset we can end up with a situation

+		 * where the TXFIFO still has data in it... the docs

+		 * suggest resetting all the fifos, so use the init_fifo

+		 * code to relayout and flush the fifos.

+		 */

+

+		s3c_hsotg_init_fifo(hsotg);

+

+		s3c_hsotg_enqueue_setup(hsotg);

+	}

+

+	/* check both FIFOs */

+

+	if (gintsts & S3C_GINTSTS_NPTxFEmp) {

+		dev_dbg(hsotg->dev, "NPTxFEmp\n");

+

+		/* Disable the interrupt to stop it happening again

+		 * unless one of these endpoint routines decides that

+		 * it needs re-enabling */

+

+		s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);

+		s3c_hsotg_irq_fifoempty(hsotg, false);

+	}

+

+	if (gintsts & S3C_GINTSTS_PTxFEmp) {

+		dev_dbg(hsotg->dev, "PTxFEmp\n");

+

+		/* See note in S3C_GINTSTS_NPTxFEmp */

+

+		s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp);

+		s3c_hsotg_irq_fifoempty(hsotg, true);

+	}

+

+	if (gintsts & S3C_GINTSTS_RxFLvl) {

+		/* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,

+		 * we need to retry s3c_hsotg_handle_rx if this is still

+		 * set. */

+

+		s3c_hsotg_handle_rx(hsotg);

+	}

+

+	if (gintsts & S3C_GINTSTS_ModeMis) {

+		dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");

+		writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS);

+	}

+

+	if (gintsts & S3C_GINTSTS_USBSusp) {

+		dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n");

+		writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS);

+

+		call_gadget(hsotg, suspend);

+	}

+

+	if (gintsts & S3C_GINTSTS_WkUpInt) {

+		dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n");

+		writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS);

+

+		call_gadget(hsotg, resume);

+	}

+

+	if (gintsts & S3C_GINTSTS_ErlySusp) {

+		dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n");

+		writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS);

+	}

+

+	/* these next two seem to crop-up occasionally causing the core

+	 * to shutdown the USB transfer, so try clearing them and logging

+	 * the occurrence. */

+

+	if (gintsts & S3C_GINTSTS_GOUTNakEff) {

+		dev_info(hsotg->dev, "GOUTNakEff triggered\n");

+

+		writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL);

+

+		s3c_hsotg_dump(hsotg);

+	}

+

+	if (gintsts & S3C_GINTSTS_GINNakEff) {

+		dev_info(hsotg->dev, "GINNakEff triggered\n");

+

+		writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL);

+

+		s3c_hsotg_dump(hsotg);

+	}

+

+	/* if we've had fifo events, we should try and go around the

+	 * loop again to see if there's any point in returning yet. */

+

+	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)

+			goto irq_retry;

+

+	return IRQ_HANDLED;

+}

+

+/**

+ * s3c_hsotg_ep_enable - enable the given endpoint

+ * @ep: The USB endpint to configure

+ * @desc: The USB endpoint descriptor to configure with.

+ *

+ * This is called from the USB gadget code's usb_ep_enable().

+*/

+static int s3c_hsotg_ep_enable(struct usb_ep *ep,

+			       const struct usb_endpoint_descriptor *desc)

+{

+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);

+	struct s3c_hsotg *hsotg = hs_ep->parent;

+	unsigned long flags;

+	int index = hs_ep->index;

+	u32 epctrl_reg;

+	u32 epctrl;

+	u32 mps;

+	int dir_in;

+	int ret = 0;

+

+	dev_dbg(hsotg->dev,

+		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",

+		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,

+		desc->wMaxPacketSize, desc->bInterval);

+

+	/* not to be called for EP0 */

+	WARN_ON(index == 0);

+

+	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;

+	if (dir_in != hs_ep->dir_in) {

+		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);

+		return -EINVAL;

+	}

+

+	mps = usb_endpoint_maxp(desc);

+

+	/* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */

+

+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);

+	epctrl = readl(hsotg->regs + epctrl_reg);

+

+	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",

+		__func__, epctrl, epctrl_reg);

+

+	spin_lock_irqsave(&hs_ep->lock, flags);

+

+	epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK);

+	epctrl |= S3C_DxEPCTL_MPS(mps);

+

+	/* mark the endpoint as active, otherwise the core may ignore

+	 * transactions entirely for this endpoint */

+	epctrl |= S3C_DxEPCTL_USBActEp;

+

+	/* set the NAK status on the endpoint, otherwise we might try and

+	 * do something with data that we've yet got a request to process

+	 * since the RXFIFO will take data for an endpoint even if the

+	 * size register hasn't been set.

+	 */

+

+	epctrl |= S3C_DxEPCTL_SNAK;

+

+	/* update the endpoint state */

+	hs_ep->ep.maxpacket = mps;

+

+	/* default, set to non-periodic */

+	hs_ep->periodic = 0;

+

+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {

+	case USB_ENDPOINT_XFER_ISOC:

+		dev_err(hsotg->dev, "no current ISOC support\n");

+		ret = -EINVAL;

+		goto out;

+

+	case USB_ENDPOINT_XFER_BULK:

+		epctrl |= S3C_DxEPCTL_EPType_Bulk;

+		break;

+

+	case USB_ENDPOINT_XFER_INT:

+		if (dir_in) {

+			/* Allocate our TxFNum by simply using the index

+			 * of the endpoint for the moment. We could do

+			 * something better if the host indicates how

+			 * many FIFOs we are expecting to use. */

+

+			hs_ep->periodic = 1;

+			epctrl |= S3C_DxEPCTL_TxFNum(index);

+		}

+

+		epctrl |= S3C_DxEPCTL_EPType_Intterupt;

+		break;

+

+	case USB_ENDPOINT_XFER_CONTROL:

+		epctrl |= S3C_DxEPCTL_EPType_Control;

+		break;

+	}

+

+	/* if the hardware has dedicated fifos, we must give each IN EP

+	 * a unique tx-fifo even if it is non-periodic.

+	 */

+	if (dir_in && hsotg->dedicated_fifos)

+		epctrl |= S3C_DxEPCTL_TxFNum(index);

+

+	/* for non control endpoints, set PID to D0 */

+	if (index)

+		epctrl |= S3C_DxEPCTL_SetD0PID;

+

+	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",

+		__func__, epctrl);

+

+	writel(epctrl, hsotg->regs + epctrl_reg);

+	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",

+		__func__, readl(hsotg->regs + epctrl_reg));

+

+	/* enable the endpoint interrupt */

+	s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);

+

+out:

+	spin_unlock_irqrestore(&hs_ep->lock, flags);

+	return ret;

+}

+

+static int s3c_hsotg_ep_disable(struct usb_ep *ep)

+{

+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);

+	struct s3c_hsotg *hsotg = hs_ep->parent;

+	int dir_in = hs_ep->dir_in;

+	int index = hs_ep->index;

+	unsigned long flags;

+	u32 epctrl_reg;

+	u32 ctrl;

+

+	dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);

+

+	if (ep == &hsotg->eps[0].ep) {

+		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);

+		return -EINVAL;

+	}

+

+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);

+

+	/* terminate all requests with shutdown */

+	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);

+

+	spin_lock_irqsave(&hs_ep->lock, flags);

+

+	ctrl = readl(hsotg->regs + epctrl_reg);

+	ctrl &= ~S3C_DxEPCTL_EPEna;

+	ctrl &= ~S3C_DxEPCTL_USBActEp;

+	ctrl |= S3C_DxEPCTL_SNAK;

+

+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);

+	writel(ctrl, hsotg->regs + epctrl_reg);

+

+	/* disable endpoint interrupts */

+	s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);

+

+	spin_unlock_irqrestore(&hs_ep->lock, flags);

+	return 0;

+}

+

+/**

+ * on_list - check request is on the given endpoint

+ * @ep: The endpoint to check.

+ * @test: The request to test if it is on the endpoint.

+*/

+static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)

+{

+	struct s3c_hsotg_req *req, *treq;

+

+	list_for_each_entry_safe(req, treq, &ep->queue, queue) {

+		if (req == test)

+			return true;

+	}

+

+	return false;

+}

+

+static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)

+{

+	struct s3c_hsotg_req *hs_req = our_req(req);

+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);

+	struct s3c_hsotg *hs = hs_ep->parent;

+	unsigned long flags;

+

+	dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);

+

+	spin_lock_irqsave(&hs_ep->lock, flags);

+

+	if (!on_list(hs_ep, hs_req)) {

+		spin_unlock_irqrestore(&hs_ep->lock, flags);

+		return -EINVAL;

+	}

+

+	s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);

+	spin_unlock_irqrestore(&hs_ep->lock, flags);

+

+	return 0;

+}

+

+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)

+{

+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);

+	struct s3c_hsotg *hs = hs_ep->parent;

+	int index = hs_ep->index;

+	unsigned long irqflags;

+	u32 epreg;

+	u32 epctl;

+	u32 xfertype;

+

+	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);

+

+	spin_lock_irqsave(&hs_ep->lock, irqflags);

+

+	/* write both IN and OUT control registers */

+

+	epreg = S3C_DIEPCTL(index);

+	epctl = readl(hs->regs + epreg);

+

+	if (value) {

+		epctl |= S3C_DxEPCTL_Stall + S3C_DxEPCTL_SNAK;

+		if (epctl & S3C_DxEPCTL_EPEna)

+			epctl |= S3C_DxEPCTL_EPDis;

+	} else {

+		epctl &= ~S3C_DxEPCTL_Stall;

+		xfertype = epctl & S3C_DxEPCTL_EPType_MASK;

+		if (xfertype == S3C_DxEPCTL_EPType_Bulk ||

+			xfertype == S3C_DxEPCTL_EPType_Intterupt)

+				epctl |= S3C_DxEPCTL_SetD0PID;

+	}

+

+	writel(epctl, hs->regs + epreg);

+

+	epreg = S3C_DOEPCTL(index);

+	epctl = readl(hs->regs + epreg);

+

+	if (value)

+		epctl |= S3C_DxEPCTL_Stall;

+	else {

+		epctl &= ~S3C_DxEPCTL_Stall;

+		xfertype = epctl & S3C_DxEPCTL_EPType_MASK;

+		if (xfertype == S3C_DxEPCTL_EPType_Bulk ||

+			xfertype == S3C_DxEPCTL_EPType_Intterupt)

+				epctl |= S3C_DxEPCTL_SetD0PID;

+	}

+

+	writel(epctl, hs->regs + epreg);

+

+	spin_unlock_irqrestore(&hs_ep->lock, irqflags);

+

+	return 0;

+}

+

+static struct usb_ep_ops s3c_hsotg_ep_ops = {

+	.enable		= s3c_hsotg_ep_enable,

+	.disable	= s3c_hsotg_ep_disable,

+	.alloc_request	= s3c_hsotg_ep_alloc_request,

+	.free_request	= s3c_hsotg_ep_free_request,

+	.queue		= s3c_hsotg_ep_queue,

+	.dequeue	= s3c_hsotg_ep_dequeue,

+	.set_halt	= s3c_hsotg_ep_sethalt,

+	/* note, don't believe we have any call for the fifo routines */

+};

+

+/**

+ * s3c_hsotg_corereset - issue softreset to the core

+ * @hsotg: The device state

+ *

+ * Issue a soft reset to the core, and await the core finishing it.

+*/

+static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)

+{

+	int timeout;

+	u32 grstctl;

+

+	dev_info(hsotg->dev, "resetting core, 0x%8x\n", hsotg->regs);

+

+	/* issue soft reset */

+	writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL);

+

+	timeout = 1000;

+	do {

+		grstctl = readl(hsotg->regs + S3C_GRSTCTL);

+	} while ((grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);

+

+	if (grstctl & S3C_GRSTCTL_CSftRst) {

+		dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");

+		return -EINVAL;

+	}

+

+	timeout = 1000;

+

+	while (1) {

+		u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL);

+

+		if (timeout-- < 0) {

+			dev_info(hsotg->dev,

+				 "%s: reset failed, GRSTCTL=%08x\n",

+				 __func__, grstctl);

+			return -ETIMEDOUT;

+		}

+

+		if (!(grstctl & S3C_GRSTCTL_AHBIdle))

+			continue;

+

+		break;		/* reset done */

+	}

+

+	dev_dbg(hsotg->dev, "reset successful\n");

+	return 0;

+}

+

+static void dw2_udc_pullup(struct usb_gadget *gadget, int value)

+{

+	struct s3c_hsotg *hsotg = our_hsotg;

+	u32 regval;

+

+	regval = readl(hsotg->regs + S3C_DCTL );

+	printk(KERN_WARNING "***udc pullup,value %d\n ", value);

+

+	if(!value){

+		regval |= S3C_DCTL_SftDiscon;

+		__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);

+	}else{

+		regval &=~S3C_DCTL_SftDiscon;

+		

+		__bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);

+		

+	}

+

+	//writel(regval, hsotg->regs + S3C_DCTL);

+		

+}

+

+static int s3c_hsotg_start(struct usb_gadget_driver *driver,

+		int (*bind)(struct usb_gadget *))

+{

+	struct s3c_hsotg *hsotg = our_hsotg;

+	int ret;

+

+	if (!hsotg) {

+		printk(KERN_ERR "%s: called with no device\n", __func__);

+		return -ENODEV;

+	}

+

+	if (!driver) {

+		dev_err(hsotg->dev, "%s: no driver\n", __func__);

+		return -EINVAL;

+	}

+

+	if (driver->max_speed < USB_SPEED_FULL)

+		dev_err(hsotg->dev, "%s: bad speed\n", __func__);

+

+	if (!bind || !driver->setup) {

+		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);

+		return -EINVAL;

+	}

+

+	WARN_ON(hsotg->driver);

+

+	driver->driver.bus = NULL;

+	hsotg->driver = driver;

+	hsotg->gadget.dev.driver = &driver->driver;

+	hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;

+	hsotg->gadget.speed = USB_SPEED_UNKNOWN;

+

+	ret = device_add(&hsotg->gadget.dev);

+	if (ret) {

+		dev_err(hsotg->dev, "failed to register gadget device\n");

+		goto err;

+	}

+

+	ret = bind(&hsotg->gadget);

+	if (ret) {

+		dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name);

+

+		hsotg->gadget.dev.driver = NULL;

+		hsotg->driver = NULL;

+		goto err;

+	}

+

+	/* we must now enable ep0 ready for host detection and then

+	 * set configuration. */

+

+	s3c_hsotg_corereset(hsotg);

+

+	/* set the PLL on, remove the HNP/SRP and set the PHY */

+	writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) |

+	       (0x5 << 10), hsotg->regs + S3C_GUSBCFG);

+

+	/* looks like soft-reset changes state of FIFOs */

+	s3c_hsotg_init_fifo(hsotg);

+

+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);

+

+	writel(1 << 18 | S3C_DCFG_DevSpd_HS,  hsotg->regs + S3C_DCFG);

+

+	/* Clear any pending OTG interrupts */

+	writel(0xffffffff, hsotg->regs + S3C_GOTGINT);

+

+	/* Clear any pending interrupts */

+	writel(0xffffffff, hsotg->regs + S3C_GINTSTS);

+

+	writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt |

+	       S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst |

+	       S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt |

+	       S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt |

+	       S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff |

+	       S3C_GINTSTS_ErlySusp,

+	       hsotg->regs + S3C_GINTMSK);

+

+	if (using_dma(hsotg))

+		writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn |

+		       S3C_GAHBCFG_HBstLen_Incr4,

+		       hsotg->regs + S3C_GAHBCFG);

+	else

+		writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG);

+

+	/* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end

+	 * up being flooded with interrupts if the host is polling the

+	 * endpoint to try and read data. */

+

+	writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |

+	       S3C_DIEPMSK_INTknEPMisMsk |

+	       S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk |

+	       ((hsotg->dedicated_fifos) ? S3C_DIEPMSK_TxFIFOEmpty : 0),

+	       hsotg->regs + S3C_DIEPMSK);

+

+	/* don't need XferCompl, we get that from RXFIFO in slave mode. In

+	 * DMA mode we may need this. */

+	writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |

+	       S3C_DOEPMSK_EPDisbldMsk |

+	       (using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |

+				   S3C_DIEPMSK_TimeOUTMsk) : 0),

+	       hsotg->regs + S3C_DOEPMSK);

+

+	writel(0, hsotg->regs + S3C_DAINTMSK);

+

+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",

+		readl(hsotg->regs + S3C_DIEPCTL0),

+		readl(hsotg->regs + S3C_DOEPCTL0));

+

+	/* enable in and out endpoint interrupts */

+	s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);

+

+	/* Enable the RXFIFO when in slave mode, as this is how we collect

+	 * the data. In DMA mode, we get events from the FIFO but also

+	 * things we cannot process, so do not use it. */

+	if (!using_dma(hsotg))

+		s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl);

+

+	/* Enable interrupts for EP0 in and out */

+	s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);

+	s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);

+

+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);

+	udelay(10);  /* see openiboot */

+	__bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);

+

+	dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));

+

+	/* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by

+	   writing to the EPCTL register.. */

+

+	/* set to read 1 8byte packet */

+	writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |

+	       S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);

+

+	writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |

+	       S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna |

+	       S3C_DxEPCTL_USBActEp,

+	       hsotg->regs + S3C_DOEPCTL0);

+

+	/* enable, but don't activate EP0in */

+	writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |

+	       S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0);

+

+	s3c_hsotg_enqueue_setup(hsotg);

+

+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",

+		readl(hsotg->regs + S3C_DIEPCTL0),

+		readl(hsotg->regs + S3C_DOEPCTL0));

+

+	/* clear global NAKs */

+	writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,

+	       hsotg->regs + S3C_DCTL);

+

+	/* must be at-least 3ms to allow bus to see disconnect */

+	msleep(3);

+

+	/* remove the soft-disconnect and let's go */

+	//__bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);

+	

+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);

+

+	/* report to the user, and return */

+

+	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);

+	return 0;

+

+err:

+	hsotg->driver = NULL;

+	hsotg->gadget.dev.driver = NULL;

+	return ret;

+}

+

+static int s3c_hsotg_stop(struct usb_gadget_driver *driver)

+{

+	struct s3c_hsotg *hsotg = our_hsotg;

+	int ep;

+

+	if (!hsotg)

+		return -ENODEV;

+

+	if (!driver || driver != hsotg->driver || !driver->unbind)

+		return -EINVAL;

+

+	/* all endpoints should be shutdown */

+	for (ep = 0; ep < S3C_HSOTG_EPS; ep++)

+		s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);

+

+	call_gadget(hsotg, disconnect);

+

+	driver->unbind(&hsotg->gadget);

+	hsotg->driver = NULL;

+	hsotg->gadget.speed = USB_SPEED_UNKNOWN;

+

+	device_del(&hsotg->gadget.dev);

+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);

+

+	dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",

+		 driver->driver.name);

+

+	return 0;

+}

+

+static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)

+{

+	return s3c_hsotg_read_frameno(to_hsotg(gadget));

+}

+

+static struct usb_gadget_ops s3c_hsotg_gadget_ops = {

+	.get_frame	= s3c_hsotg_gadget_getframe,

+	.start		= s3c_hsotg_start,

+	.stop		= s3c_hsotg_stop,

+	.pullup		= dw2_udc_pullup,

+};

+

+/**

+ * s3c_hsotg_initep - initialise a single endpoint

+ * @hsotg: The device state.

+ * @hs_ep: The endpoint to be initialised.

+ * @epnum: The endpoint number

+ *

+ * Initialise the given endpoint (as part of the probe and device state

+ * creation) to give to the gadget driver. Setup the endpoint name, any

+ * direction information and other state that may be required.

+ */

+static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,

+				       struct s3c_hsotg_ep *hs_ep,

+				       int epnum)

+{

+	u32 ptxfifo;

+	char *dir;

+

+	if (epnum == 0)

+		dir = "";

+	else if ((epnum % 2) == 0) {

+		dir = "out";

+	} else {

+		dir = "in";

+		hs_ep->dir_in = 1;

+	}

+

+	hs_ep->index = epnum;

+

+	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);

+

+	INIT_LIST_HEAD(&hs_ep->queue);

+	INIT_LIST_HEAD(&hs_ep->ep.ep_list);

+

+	spin_lock_init(&hs_ep->lock);

+

+	/* add to the list of endpoints known by the gadget driver */

+	if (epnum)

+		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);

+

+	hs_ep->parent = hsotg;

+	hs_ep->ep.name = hs_ep->name;

+	hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;

+	hs_ep->ep.ops = &s3c_hsotg_ep_ops;

+

+	/* Read the FIFO size for the Periodic TX FIFO, even if we're

+	 * an OUT endpoint, we may as well do this if in future the

+	 * code is changed to make each endpoint's direction changeable.

+	 */

+

+	ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum));

+	hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4;

+

+	/* if we're using dma, we need to set the next-endpoint pointer

+	 * to be something valid.

+	 */

+

+	if (using_dma(hsotg)) {

+		u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15);

+		writel(next, hsotg->regs + S3C_DIEPCTL(epnum));

+		writel(next, hsotg->regs + S3C_DOEPCTL(epnum));

+	}

+}

+

+/**

+ * s3c_hsotg_otgreset - reset the OtG phy block

+ * @hsotg: The host state.

+ *

+ * Power up the phy, set the basic configuration and start the PHY.

+ */

+static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)

+{

+

+#if 0

+	struct clk *xusbxti;

+	u32 pwr, osc;

+

+	pwr = readl(S3C_PHYPWR);

+	pwr &= ~0x19;

+	writel(pwr, S3C_PHYPWR);

+	mdelay(1);

+

+	osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0;

+

+	xusbxti = clk_get(hsotg->dev, "xusbxti");

+	if (xusbxti && !IS_ERR(xusbxti)) {

+		switch (clk_get_rate(xusbxti)) {

+		case 12*MHZ:

+			osc |= S3C_PHYCLK_CLKSEL_12M;

+			break;

+		case 24*MHZ:

+			osc |= S3C_PHYCLK_CLKSEL_24M;

+			break;

+		default:

+		case 48*MHZ:

+			/* default reference clock */

+			break;

+		}

+		clk_put(xusbxti);

+	}

+

+	writel(osc | 0x10, S3C_PHYCLK);

+

+	/* issue a full set of resets to the otg and core */

+

+	writel(S3C_RSTCON_PHY, S3C_RSTCON);

+	udelay(20);	/* at-least 10uS */

+	writel(0, S3C_RSTCON);

+#else

+struct clk *xusbphy;

+

+xusbphy = clk_get(hsotg->dev, "work_clk");

+if (IS_ERR(hsotg->clk)) {

+	printk(KERN_WARNING "cannot get otg clock\n");

+	return;

+}

+

+clk_enable(xusbphy);

+

+udelay(20);

+

+#endif

+}

+

+

+static void s3c_hsotg_init(struct s3c_hsotg *hsotg)

+{

+	u32 cfg4;

+

+	/* unmask subset of endpoint interrupts */

+

+	writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |

+	       S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,

+	       hsotg->regs + S3C_DIEPMSK);

+

+	writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |

+	       S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk,

+	       hsotg->regs + S3C_DOEPMSK);

+

+	writel(0, hsotg->regs + S3C_DAINTMSK);

+

+	/* Be in disconnected state until gadget is registered */

+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);

+

+	if (0) {

+		/* post global nak until we're ready */

+		writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak,

+		       hsotg->regs + S3C_DCTL);

+	}

+

+	/* setup fifos */

+

+	dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",

+		readl(hsotg->regs + S3C_GRXFSIZ),

+		readl(hsotg->regs + S3C_GNPTXFSIZ));

+

+	s3c_hsotg_init_fifo(hsotg);

+

+	/* set the PLL on, remove the HNP/SRP and set the PHY */

+	writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10),

+	       hsotg->regs + S3C_GUSBCFG);

+

+	writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0,

+	       hsotg->regs + S3C_GAHBCFG);

+

+	/* check hardware configuration */

+

+	cfg4 = readl(hsotg->regs + 0x50);

+	hsotg->dedicated_fifos = (cfg4 >> 25) & 1;

+

+	dev_info(hsotg->dev, "%s fifos\n",

+		 hsotg->dedicated_fifos ? "dedicated" : "shared");

+}

+

+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)

+{

+#ifdef DEBUG

+	struct device *dev = hsotg->dev;

+	void __iomem *regs = hsotg->regs;

+	u32 val;

+	int idx;

+

+	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",

+		 readl(regs + S3C_DCFG), readl(regs + S3C_DCTL),

+		 readl(regs + S3C_DIEPMSK));

+

+	dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",

+		 readl(regs + S3C_GAHBCFG), readl(regs + 0x44));

+

+	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",

+		 readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ));

+

+	/* show periodic fifo settings */

+

+	for (idx = 1; idx <= 15; idx++) {

+		val = readl(regs + S3C_DPTXFSIZn(idx));

+		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,

+			 val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,

+			 val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);

+	}

+

+	for (idx = 0; idx < 15; idx++) {

+		dev_info(dev,

+			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,

+			 readl(regs + S3C_DIEPCTL(idx)),

+			 readl(regs + S3C_DIEPTSIZ(idx)),

+			 readl(regs + S3C_DIEPDMA(idx)));

+

+		val = readl(regs + S3C_DOEPCTL(idx));

+		dev_info(dev,

+			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",

+			 idx, readl(regs + S3C_DOEPCTL(idx)),

+			 readl(regs + S3C_DOEPTSIZ(idx)),

+			 readl(regs + S3C_DOEPDMA(idx)));

+

+	}

+

+	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",

+		 readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));

+#endif

+}

+

+

+/**

+ * state_show - debugfs: show overall driver and device state.

+ * @seq: The seq file to write to.

+ * @v: Unused parameter.

+ *

+ * This debugfs entry shows the overall state of the hardware and

+ * some general information about each of the endpoints available

+ * to the system.

+ */

+static int state_show(struct seq_file *seq, void *v)

+{

+	struct s3c_hsotg *hsotg = seq->private;

+	void __iomem *regs = hsotg->regs;

+	int idx;

+

+	seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",

+		 readl(regs + S3C_DCFG),

+		 readl(regs + S3C_DCTL),

+		 readl(regs + S3C_DSTS));

+

+	seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",

+		   readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK));

+

+	seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",

+		   readl(regs + S3C_GINTMSK),

+		   readl(regs + S3C_GINTSTS));

+

+	seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",

+		   readl(regs + S3C_DAINTMSK),

+		   readl(regs + S3C_DAINT));

+

+	seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",

+		   readl(regs + S3C_GNPTXSTS),

+		   readl(regs + S3C_GRXSTSR));

+

+	seq_printf(seq, "\nEndpoint status:\n");

+

+	for (idx = 0; idx < 15; idx++) {

+		u32 in, out;

+

+		in = readl(regs + S3C_DIEPCTL(idx));

+		out = readl(regs + S3C_DOEPCTL(idx));

+

+		seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",

+			   idx, in, out);

+

+		in = readl(regs + S3C_DIEPTSIZ(idx));

+		out = readl(regs + S3C_DOEPTSIZ(idx));

+

+		seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",

+			   in, out);

+

+		seq_printf(seq, "\n");

+	}

+

+	return 0;

+}

+

+static int state_open(struct inode *inode, struct file *file)

+{

+	return single_open(file, state_show, inode->i_private);

+}

+

+static const struct file_operations state_fops = {

+	.owner		= THIS_MODULE,

+	.open		= state_open,

+	.read		= seq_read,

+	.llseek		= seq_lseek,

+	.release	= single_release,

+};

+

+/**

+ * fifo_show - debugfs: show the fifo information

+ * @seq: The seq_file to write data to.

+ * @v: Unused parameter.

+ *

+ * Show the FIFO information for the overall fifo and all the

+ * periodic transmission FIFOs.

+*/

+static int fifo_show(struct seq_file *seq, void *v)

+{

+	struct s3c_hsotg *hsotg = seq->private;

+	void __iomem *regs = hsotg->regs;

+	u32 val;

+	int idx;

+

+	seq_printf(seq, "Non-periodic FIFOs:\n");

+	seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ));

+

+	val = readl(regs + S3C_GNPTXFSIZ);

+	seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",

+		   val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT,

+		   val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK);

+

+	seq_printf(seq, "\nPeriodic TXFIFOs:\n");

+

+	for (idx = 1; idx <= 15; idx++) {

+		val = readl(regs + S3C_DPTXFSIZn(idx));

+

+		seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,

+			   val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,

+			   val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);

+	}

+

+	return 0;

+}

+

+static int fifo_open(struct inode *inode, struct file *file)

+{

+	return single_open(file, fifo_show, inode->i_private);

+}

+

+static const struct file_operations fifo_fops = {

+	.owner		= THIS_MODULE,

+	.open		= fifo_open,

+	.read		= seq_read,

+	.llseek		= seq_lseek,

+	.release	= single_release,

+};

+

+

+static const char *decode_direction(int is_in)

+{

+	return is_in ? "in" : "out";

+}

+

+/**

+ * ep_show - debugfs: show the state of an endpoint.

+ * @seq: The seq_file to write data to.

+ * @v: Unused parameter.

+ *

+ * This debugfs entry shows the state of the given endpoint (one is

+ * registered for each available).

+*/

+static int ep_show(struct seq_file *seq, void *v)

+{

+	struct s3c_hsotg_ep *ep = seq->private;

+	struct s3c_hsotg *hsotg = ep->parent;

+	struct s3c_hsotg_req *req;

+	void __iomem *regs = hsotg->regs;

+	int index = ep->index;

+	int show_limit = 15;

+	unsigned long flags;

+

+	seq_printf(seq, "Endpoint index %d, named %s,  dir %s:\n",

+		   ep->index, ep->ep.name, decode_direction(ep->dir_in));

+

+	/* first show the register state */

+

+	seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",

+		   readl(regs + S3C_DIEPCTL(index)),

+		   readl(regs + S3C_DOEPCTL(index)));

+

+	seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",

+		   readl(regs + S3C_DIEPDMA(index)),

+		   readl(regs + S3C_DOEPDMA(index)));

+

+	seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",

+		   readl(regs + S3C_DIEPINT(index)),

+		   readl(regs + S3C_DOEPINT(index)));

+

+	seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",

+		   readl(regs + S3C_DIEPTSIZ(index)),

+		   readl(regs + S3C_DOEPTSIZ(index)));

+

+	seq_printf(seq, "\n");

+	seq_printf(seq, "mps %d\n", ep->ep.maxpacket);

+	seq_printf(seq, "total_data=%ld\n", ep->total_data);

+

+	seq_printf(seq, "request list (%p,%p):\n",

+		   ep->queue.next, ep->queue.prev);

+

+	spin_lock_irqsave(&ep->lock, flags);

+

+	list_for_each_entry(req, &ep->queue, queue) {

+		if (--show_limit < 0) {

+			seq_printf(seq, "not showing more requests...\n");

+			break;

+		}

+

+		seq_printf(seq, "%c req %p: %d bytes @%p, ",

+			   req == ep->req ? '*' : ' ',

+			   req, req->req.length, req->req.buf);

+		seq_printf(seq, "%d done, res %d\n",

+			   req->req.actual, req->req.status);

+	}

+

+	spin_unlock_irqrestore(&ep->lock, flags);

+

+	return 0;

+}

+

+static int ep_open(struct inode *inode, struct file *file)

+{

+	return single_open(file, ep_show, inode->i_private);

+}

+

+static const struct file_operations ep_fops = {

+	.owner		= THIS_MODULE,

+	.open		= ep_open,

+	.read		= seq_read,

+	.llseek		= seq_lseek,

+	.release	= single_release,

+};

+

+/**

+ * s3c_hsotg_create_debug - create debugfs directory and files

+ * @hsotg: The driver state

+ *

+ * Create the debugfs files to allow the user to get information

+ * about the state of the system. The directory name is created

+ * with the same name as the device itself, in case we end up

+ * with multiple blocks in future systems.

+*/

+static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)

+{

+	struct dentry *root;

+	unsigned epidx;

+

+	root = debugfs_create_dir(dev_name(hsotg->dev), NULL);

+	hsotg->debug_root = root;

+	if (IS_ERR(root)) {

+		dev_err(hsotg->dev, "cannot create debug root\n");

+		return;

+	}

+

+	/* create general state file */

+

+	hsotg->debug_file = debugfs_create_file("state", 0444, root,

+						hsotg, &state_fops);

+

+	if (IS_ERR(hsotg->debug_file))

+		dev_err(hsotg->dev, "%s: failed to create state\n", __func__);

+

+	hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,

+						hsotg, &fifo_fops);

+

+	if (IS_ERR(hsotg->debug_fifo))

+		dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);

+

+	/* create one file for each endpoint */

+

+	for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {

+		struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];

+

+		ep->debugfs = debugfs_create_file(ep->name, 0444,

+						  root, ep, &ep_fops);

+

+		if (IS_ERR(ep->debugfs))

+			dev_err(hsotg->dev, "failed to create %s debug file\n",

+				ep->name);

+	}

+}

+

+/**

+ * s3c_hsotg_delete_debug - cleanup debugfs entries

+ * @hsotg: The driver state

+ *

+ * Cleanup (remove) the debugfs files for use on module exit.

+*/

+static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)

+{

+	unsigned epidx;

+

+	for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {

+		struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];

+		debugfs_remove(ep->debugfs);

+	}

+

+	debugfs_remove(hsotg->debug_file);

+	debugfs_remove(hsotg->debug_fifo);

+	debugfs_remove(hsotg->debug_root);

+}

+

+/**

+ * s3c_hsotg_gate - set the hardware gate for the block

+ * @pdev: The device we bound to

+ * @on: On or off.

+ *

+ * Set the hardware gate setting into the block. If we end up on

+ * something other than an S3C64XX, then we might need to change this

+ * to using a platform data callback, or some other mechanism.

+ */

+static void s3c_hsotg_gate(struct platform_device *pdev, bool on)

+{

+	unsigned long flags;

+	u32 others;

+

+#if 0

+	local_irq_save(flags);

+

+	others = __raw_readl(S3C64XX_OTHERS);

+	if (on)

+		others |= S3C64XX_OTHERS_USBMASK;

+	else

+		others &= ~S3C64XX_OTHERS_USBMASK;

+	__raw_writel(others, S3C64XX_OTHERS);

+

+	local_irq_restore(flags);

+#endif

+}

+

+static struct s3c_hsotg_plat s3c_hsotg_default_pdata;

+

+static int __devinit s3c_hsotg_probe(struct platform_device *pdev)

+{

+	struct s3c_hsotg_plat *plat = pdev->dev.platform_data;

+	struct device *dev = &pdev->dev;

+	struct s3c_hsotg *hsotg;

+	struct resource *res;

+	int epnum;

+	int ret;

+

+	printk(KERN_WARNING "\n####### USB1 PROBE###################\n");

+

+	if (!plat)

+		plat = &s3c_hsotg_default_pdata;

+

+	hsotg = kzalloc(sizeof(struct s3c_hsotg) +

+			sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS,

+			GFP_KERNEL);

+	if (!hsotg) {

+		dev_err(dev, "cannot get memory\n");

+		return -ENOMEM;

+	}

+

+	hsotg->dev = dev;

+	hsotg->plat = plat;

+

+	hsotg->clk = clk_get(&pdev->dev, "ahb_clk");

+	if (IS_ERR(hsotg->clk)) {

+		dev_err(dev, "cannot get otg clock\n");

+		ret = PTR_ERR(hsotg->clk);

+		goto err_mem;

+	}

+

+	platform_set_drvdata(pdev, hsotg);

+

+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

+	if (!res) {

+		dev_err(dev, "cannot find register resource 0\n");

+		ret = -EINVAL;

+		goto err_clk;

+	}

+

+	hsotg->regs_res = request_mem_region(res->start, resource_size(res),

+					     dev_name(dev));

+	if (!hsotg->regs_res) {

+		dev_err(dev, "cannot reserve registers\n");

+		ret = -ENOENT;

+		goto err_clk;

+	}

+

+	hsotg->regs = ioremap(res->start, resource_size(res));

+	if (!hsotg->regs) {

+		dev_err(dev, "cannot map registers\n");

+		ret = -ENXIO;

+		goto err_regs_res;

+	}

+

+	ret = platform_get_irq(pdev, 0);

+	if (ret < 0) {

+		dev_err(dev, "cannot find IRQ\n");

+		goto err_regs;

+	}

+

+	hsotg->irq = ret;

+

+	ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg);

+	if (ret < 0) {

+		dev_err(dev, "cannot claim IRQ\n");

+		goto err_regs;

+	}

+

+	dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);

+

+	device_initialize(&hsotg->gadget.dev);

+

+	dev_set_name(&hsotg->gadget.dev, "dw_gadget");

+

+	hsotg->gadget.max_speed = USB_SPEED_HIGH;

+	hsotg->gadget.ops = &s3c_hsotg_gadget_ops;

+	hsotg->gadget.name = dev_name(dev);

+

+	hsotg->gadget.dev.parent = dev;

+	hsotg->gadget.dev.dma_mask = dev->dma_mask;

+

+	/* setup endpoint information */

+

+	INIT_LIST_HEAD(&hsotg->gadget.ep_list);

+	hsotg->gadget.ep0 = &hsotg->eps[0].ep;

+

+	/* allocate EP0 request */

+

+	hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,

+						     GFP_KERNEL);

+	if (!hsotg->ctrl_req) {

+		dev_err(dev, "failed to allocate ctrl req\n");

+		goto err_regs;

+	}

+

+	/* reset the system */

+

+	clk_enable(hsotg->clk);

+

+	s3c_hsotg_gate(pdev, true);

+

+	s3c_hsotg_otgreset(hsotg);

+	s3c_hsotg_corereset(hsotg);

+	s3c_hsotg_init(hsotg);

+

+	/* initialise the endpoints now the core has been initialised */

+	for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++)

+		s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);

+

+	ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);

+	if (ret)

+		goto err_add_udc;

+

+	s3c_hsotg_create_debug(hsotg);

+

+	s3c_hsotg_dump(hsotg);

+

+	our_hsotg = hsotg;

+	return 0;

+

+err_add_udc:

+	s3c_hsotg_gate(pdev, false);

+	clk_disable(hsotg->clk);

+	clk_put(hsotg->clk);

+

+err_regs:

+	iounmap(hsotg->regs);

+

+err_regs_res:

+	release_resource(hsotg->regs_res);

+	kfree(hsotg->regs_res);

+err_clk:

+	clk_put(hsotg->clk);

+err_mem:

+	kfree(hsotg);

+	return ret;

+}

+

+static int __devexit s3c_hsotg_remove(struct platform_device *pdev)

+{

+	struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);

+

+	usb_del_gadget_udc(&hsotg->gadget);

+

+	s3c_hsotg_delete_debug(hsotg);

+

+	usb_gadget_unregister_driver(hsotg->driver);

+

+	free_irq(hsotg->irq, hsotg);

+	iounmap(hsotg->regs);

+

+	release_resource(hsotg->regs_res);

+	kfree(hsotg->regs_res);

+

+	s3c_hsotg_gate(pdev, false);

+

+	clk_disable(hsotg->clk);

+	clk_put(hsotg->clk);

+

+	kfree(hsotg);

+	return 0;

+}

+

+#if 1

+#define s3c_hsotg_suspend NULL

+#define s3c_hsotg_resume NULL

+#endif

+

+static struct platform_driver s3c_hsotg_driver = {

+	.driver		= {

+		.name	= "zx297510_hsotg",

+		.owner	= THIS_MODULE,

+	},

+	.probe		= s3c_hsotg_probe,

+	.remove		= __devexit_p(s3c_hsotg_remove),

+	.suspend	= s3c_hsotg_suspend,

+	.resume		= s3c_hsotg_resume,

+};

+

+module_platform_driver(s3c_hsotg_driver);

+

+MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");

+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");

+MODULE_LICENSE("GPL");

+MODULE_ALIAS("platform:s3c-hsotg");

+

diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/epautoconf.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/epautoconf.c
new file mode 100644
index 0000000..bec42c5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/epautoconf.c
@@ -0,0 +1,399 @@
+/*
+ * epautoconf.c -- endpoint autoconfiguration for usb gadget drivers
+ *
+ * Copyright (C) 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+
+/* we must assign addresses for configurable endpoints (like net2280) */
+static unsigned epnum;
+
+// #define MANY_ENDPOINTS
+#ifdef MANY_ENDPOINTS
+/* more than 15 configurable endpoints */
+static unsigned in_epnum;
+#endif
+
+
+/*
+ * This should work with endpoints from controller drivers sharing the
+ * same endpoint naming convention.  By example:
+ *
+ *	- ep1, ep2, ... address is fixed, not direction or type
+ *	- ep1in, ep2out, ... address and direction are fixed, not type
+ *	- ep1-bulk, ep2-bulk, ... address and type are fixed, not direction
+ *	- ep1in-bulk, ep2out-iso, ... all three are fixed
+ *	- ep-* ... no functionality restrictions
+ *
+ * Type suffixes are "-bulk", "-iso", or "-int".  Numbers are decimal.
+ * Less common restrictions are implied by gadget_is_*().
+ *
+ * NOTE:  each endpoint is unidirectional, as specified by its USB
+ * descriptor; and isn't specific to a configuration or altsetting.
+ */
+static int
+ep_matches (
+	struct usb_gadget		*gadget,
+	struct usb_ep			*ep,
+	struct usb_endpoint_descriptor	*desc,
+	struct usb_ss_ep_comp_descriptor *ep_comp
+)
+{
+	u8		type;
+	const char	*tmp;
+	u16		max;
+
+	int		num_req_streams = 0;
+
+	/* endpoint already claimed? */
+	if (NULL != ep->driver_data)
+		return 0;
+
+	/* only support ep0 for portable CONTROL traffic */
+	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+	if (USB_ENDPOINT_XFER_CONTROL == type)
+		return 0;
+
+	/* some other naming convention */
+	if ('e' != ep->name[0])
+		return 0;
+
+	/* type-restriction:  "-iso", "-bulk", or "-int".
+	 * direction-restriction:  "in", "out".
+	 */
+	if ('-' != ep->name[2]) {
+		tmp = strrchr (ep->name, '-');
+		if (tmp) {
+			switch (type) {
+			case USB_ENDPOINT_XFER_INT:
+				/* bulk endpoints handle interrupt transfers,
+				 * except the toggle-quirky iso-synch kind
+				 */
+				if ('s' == tmp[2])	// == "-iso"
+					return 0;
+				/* for now, avoid PXA "interrupt-in";
+				 * it's documented as never using DATA1.
+				 */
+				if (gadget_is_pxa (gadget)
+						&& 'i' == tmp [1])
+					return 0;
+				break;
+			case USB_ENDPOINT_XFER_BULK:
+				if ('b' != tmp[1])	// != "-bulk"
+					return 0;
+				break;
+			case USB_ENDPOINT_XFER_ISOC:
+				if ('s' != tmp[2])	// != "-iso"
+					return 0;
+			}
+		} else {
+			tmp = ep->name + strlen (ep->name);
+		}
+
+		/* direction-restriction:  "..in-..", "out-.." */
+		tmp--;
+		if (!isdigit (*tmp)) {
+			if (desc->bEndpointAddress & USB_DIR_IN) {
+				if ('n' != *tmp)
+					return 0;
+			} else {
+				if ('t' != *tmp)
+					return 0;
+			}
+		}
+	}
+
+	/*
+	 * Get the number of required streams from the EP companion
+	 * descriptor and see if the EP matches it
+	 */
+	if (usb_endpoint_xfer_bulk(desc)) {
+		if (ep_comp && gadget->max_speed >= USB_SPEED_SUPER) {
+			num_req_streams = ep_comp->bmAttributes & 0x1f;
+			if (num_req_streams > ep->max_streams)
+				return 0;
+		}
+
+	}
+
+	/*
+	 * If the protocol driver hasn't yet decided on wMaxPacketSize
+	 * and wants to know the maximum possible, provide the info.
+	 */
+	if (desc->wMaxPacketSize == 0)
+		desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket);
+
+	/* endpoint maxpacket size is an input parameter, except for bulk
+	 * where it's an output parameter representing the full speed limit.
+	 * the usb spec fixes high speed bulk maxpacket at 512 bytes.
+	 */
+	max = 0x7ff & usb_endpoint_maxp(desc);
+	switch (type) {
+	case USB_ENDPOINT_XFER_INT:
+		/* INT:  limit 64 bytes full speed, 1024 high/super speed */
+		if (!gadget_is_dualspeed(gadget) && max > 64)
+			return 0;
+		/* FALLTHROUGH */
+
+	case USB_ENDPOINT_XFER_ISOC:
+		/* ISO:  limit 1023 bytes full speed, 1024 high/super speed */
+		if (ep->maxpacket < max)
+			return 0;
+		if (!gadget_is_dualspeed(gadget) && max > 1023)
+			return 0;
+
+		/* BOTH:  "high bandwidth" works only at high speed */
+		if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) {
+			if (!gadget_is_dualspeed(gadget))
+				return 0;
+			/* configure your hardware with enough buffering!! */
+		}
+		break;
+	}
+
+	/* MATCH!! */
+
+	/* report address */
+	desc->bEndpointAddress &= USB_DIR_IN;
+	if (isdigit (ep->name [2])) {
+		u8	num = simple_strtoul (&ep->name [2], NULL, 10);
+		desc->bEndpointAddress |= num;
+#ifdef	MANY_ENDPOINTS
+	} else if (desc->bEndpointAddress & USB_DIR_IN) {
+		if (++in_epnum > 15)
+			return 0;
+		desc->bEndpointAddress = USB_DIR_IN | in_epnum;
+#endif
+	} else {
+		if (++epnum > 15)
+			return 0;
+		desc->bEndpointAddress |= epnum;
+	}
+
+	/* report (variable) full speed bulk maxpacket */
+	if ((USB_ENDPOINT_XFER_BULK == type) && !ep_comp) {
+		int size = ep->maxpacket;
+
+		/* min() doesn't work on bitfields with gcc-3.5 */
+		if (size > 64)
+			size = 64;
+		desc->wMaxPacketSize = cpu_to_le16(size);
+	}
+	ep->address = desc->bEndpointAddress;
+	return 1;
+}
+
+static struct usb_ep *
+find_ep (struct usb_gadget *gadget, const char *name)
+{
+	struct usb_ep	*ep;
+
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		if (0 == strcmp (ep->name, name))
+			return ep;
+	}
+	return NULL;
+}
+
+/**
+ * usb_ep_autoconfig_ss() - choose an endpoint matching the ep
+ * descriptor and ep companion descriptor
+ * @gadget: The device to which the endpoint must belong.
+ * @desc: Endpoint descriptor, with endpoint direction and transfer mode
+ *    initialized.  For periodic transfers, the maximum packet
+ *    size must also be initialized.  This is modified on
+ *    success.
+ * @ep_comp: Endpoint companion descriptor, with the required
+ *    number of streams. Will be modified when the chosen EP
+ *    supports a different number of streams.
+ *
+ * This routine replaces the usb_ep_autoconfig when needed
+ * superspeed enhancments. If such enhancemnets are required,
+ * the FD should call usb_ep_autoconfig_ss directly and provide
+ * the additional ep_comp parameter.
+ *
+ * By choosing an endpoint to use with the specified descriptor,
+ * this routine simplifies writing gadget drivers that work with
+ * multiple USB device controllers.  The endpoint would be
+ * passed later to usb_ep_enable(), along with some descriptor.
+ *
+ * That second descriptor won't always be the same as the first one.
+ * For example, isochronous endpoints can be autoconfigured for high
+ * bandwidth, and then used in several lower bandwidth altsettings.
+ * Also, high and full speed descriptors will be different.
+ *
+ * Be sure to examine and test the results of autoconfiguration
+ * on your hardware.  This code may not make the best choices
+ * about how to use the USB controller, and it can't know all
+ * the restrictions that may apply. Some combinations of driver
+ * and hardware won't be able to autoconfigure.
+ *
+ * On success, this returns an un-claimed usb_ep, and modifies the endpoint
+ * descriptor bEndpointAddress.  For bulk endpoints, the wMaxPacket value
+ * is initialized as if the endpoint were used at full speed and
+ * the bmAttribute field in the ep companion descriptor is
+ * updated with the assigned number of streams if it is
+ * different from the original value. To prevent the endpoint
+ * from being returned by a later autoconfig call, claim it by
+ * assigning ep->driver_data to some non-null value.
+ *
+ * On failure, this returns a null endpoint descriptor.
+ */
+struct usb_ep *usb_ep_autoconfig_ss(
+	struct usb_gadget		*gadget,
+	struct usb_endpoint_descriptor	*desc,
+	struct usb_ss_ep_comp_descriptor *ep_comp
+)
+{
+	struct usb_ep	*ep;
+	u8		type;
+
+	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	/* First, apply chip-specific "best usage" knowledge.
+	 * This might make a good usb_gadget_ops hook ...
+	 */
+	if (gadget_is_net2280 (gadget) && type == USB_ENDPOINT_XFER_INT) {
+		/* ep-e, ep-f are PIO with only 64 byte fifos */
+		ep = find_ep (gadget, "ep-e");
+		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+		ep = find_ep (gadget, "ep-f");
+		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+
+	} else if (gadget_is_goku (gadget)) {
+		if (USB_ENDPOINT_XFER_INT == type) {
+			/* single buffering is enough */
+			ep = find_ep(gadget, "ep3-bulk");
+			if (ep && ep_matches(gadget, ep, desc, ep_comp))
+				goto found_ep;
+		} else if (USB_ENDPOINT_XFER_BULK == type
+				&& (USB_DIR_IN & desc->bEndpointAddress)) {
+			/* DMA may be available */
+			ep = find_ep(gadget, "ep2-bulk");
+			if (ep && ep_matches(gadget, ep, desc,
+					      ep_comp))
+				goto found_ep;
+		}
+
+#ifdef CONFIG_BLACKFIN
+	} else if (gadget_is_musbhdrc(gadget)) {
+		if ((USB_ENDPOINT_XFER_BULK == type) ||
+		    (USB_ENDPOINT_XFER_ISOC == type)) {
+			if (USB_DIR_IN & desc->bEndpointAddress)
+				ep = find_ep (gadget, "ep5in");
+			else
+				ep = find_ep (gadget, "ep6out");
+		} else if (USB_ENDPOINT_XFER_INT == type) {
+			if (USB_DIR_IN & desc->bEndpointAddress)
+				ep = find_ep(gadget, "ep1in");
+			else
+				ep = find_ep(gadget, "ep2out");
+		} else
+			ep = NULL;
+		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+#endif
+	}else if(gadget_is_dwc2(gadget) && type == USB_ENDPOINT_XFER_INT){
+		/* 7520v2 dwc otg ¿ØÖÆÆ÷ep7ÊÕ·¢¸÷×ÔÖ»ÓÐ192×Ö½Úfifo, Òò´Ë
+		¸Ã¶ËµãÓÅÏÈ·ÖÅä¸øÍø¿ÚÖж϶˵ã*/
+		ep = find_ep (gadget, "ep7in");
+		if (ep && (ep->driver_data == NULL)&& ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;	
+	}
+
+	/* Second, look at endpoints until an unclaimed one looks usable */
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		if (ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+	}
+
+	/* Fail */
+	return NULL;
+found_ep:
+	ep->desc = NULL;
+	ep->comp_desc = NULL;
+	return ep;
+}
+
+/**
+ * usb_ep_autoconfig() - choose an endpoint matching the
+ * descriptor
+ * @gadget: The device to which the endpoint must belong.
+ * @desc: Endpoint descriptor, with endpoint direction and transfer mode
+ *	initialized.  For periodic transfers, the maximum packet
+ *	size must also be initialized.  This is modified on success.
+ *
+ * By choosing an endpoint to use with the specified descriptor, this
+ * routine simplifies writing gadget drivers that work with multiple
+ * USB device controllers.  The endpoint would be passed later to
+ * usb_ep_enable(), along with some descriptor.
+ *
+ * That second descriptor won't always be the same as the first one.
+ * For example, isochronous endpoints can be autoconfigured for high
+ * bandwidth, and then used in several lower bandwidth altsettings.
+ * Also, high and full speed descriptors will be different.
+ *
+ * Be sure to examine and test the results of autoconfiguration on your
+ * hardware.  This code may not make the best choices about how to use the
+ * USB controller, and it can't know all the restrictions that may apply.
+ * Some combinations of driver and hardware won't be able to autoconfigure.
+ *
+ * On success, this returns an un-claimed usb_ep, and modifies the endpoint
+ * descriptor bEndpointAddress.  For bulk endpoints, the wMaxPacket value
+ * is initialized as if the endpoint were used at full speed.  To prevent
+ * the endpoint from being returned by a later autoconfig call, claim it
+ * by assigning ep->driver_data to some non-null value.
+ *
+ * On failure, this returns a null endpoint descriptor.
+ */
+struct usb_ep *usb_ep_autoconfig(
+	struct usb_gadget		*gadget,
+	struct usb_endpoint_descriptor	*desc
+)
+{
+	return usb_ep_autoconfig_ss(gadget, desc, NULL);
+}
+
+
+/**
+ * usb_ep_autoconfig_reset - reset endpoint autoconfig state
+ * @gadget: device for which autoconfig state will be reset
+ *
+ * Use this for devices where one configuration may need to assign
+ * endpoint resources very differently from the next one.  It clears
+ * state such as ep->driver_data and the record of assigned endpoints
+ * used by usb_ep_autoconfig().
+ */
+void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
+{
+	struct usb_ep	*ep;
+
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		ep->driver_data = NULL;
+	}
+#ifdef	MANY_ENDPOINTS
+	in_epnum = 0;
+#endif
+	epnum = 0;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ether.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ether.c
new file mode 100644
index 0000000..a28f6ff
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ether.c
@@ -0,0 +1,413 @@
+/*
+ * ether.c -- Ethernet gadget driver, with CDC and non-CDC options
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+
+#if defined USB_ETH_RNDIS
+#  undef USB_ETH_RNDIS
+#endif
+#ifdef CONFIG_USB_ETH_RNDIS
+#  define USB_ETH_RNDIS y
+#endif
+
+#include "u_ether.h"
+
+
+/*
+ * Ethernet gadget driver -- with CDC and non-CDC options
+ * Builds on hardware support for a full duplex link.
+ *
+ * CDC Ethernet is the standard USB solution for sending Ethernet frames
+ * using USB.  Real hardware tends to use the same framing protocol but look
+ * different for control features.  This driver strongly prefers to use
+ * this USB-IF standard as its open-systems interoperability solution;
+ * most host side USB stacks (except from Microsoft) support it.
+ *
+ * This is sometimes called "CDC ECM" (Ethernet Control Model) to support
+ * TLA-soup.  "CDC ACM" (Abstract Control Model) is for modems, and a new
+ * "CDC EEM" (Ethernet Emulation Model) is starting to spread.
+ *
+ * There's some hardware that can't talk CDC ECM.  We make that hardware
+ * implement a "minimalist" vendor-agnostic CDC core:  same framing, but
+ * link-level setup only requires activating the configuration.  Only the
+ * endpoint descriptors, and product/vendor IDs, are relevant; no control
+ * operations are available.  Linux supports it, but other host operating
+ * systems may not.  (This is a subset of CDC Ethernet.)
+ *
+ * It turns out that if you add a few descriptors to that "CDC Subset",
+ * (Windows) host side drivers from MCCI can treat it as one submode of
+ * a proprietary scheme called "SAFE" ... without needing to know about
+ * specific product/vendor IDs.  So we do that, making it easier to use
+ * those MS-Windows drivers.  Those added descriptors make it resemble a
+ * CDC MDLM device, but they don't change device behavior at all.  (See
+ * MCCI Engineering report 950198 "SAFE Networking Functions".)
+ *
+ * A third option is also in use.  Rather than CDC Ethernet, or something
+ * simpler, Microsoft pushes their own approach: RNDIS.  The published
+ * RNDIS specs are ambiguous and appear to be incomplete, and are also
+ * needlessly complex.  They borrow more from CDC ACM than CDC ECM.
+ */
+
+#define DRIVER_DESC		"Ethernet Gadget"
+#define DRIVER_VERSION		"Memorial Day 2008"
+
+#ifdef USB_ETH_RNDIS
+#define PREFIX			"RNDIS/"
+#else
+#define PREFIX			""
+#endif
+
+/*
+ * This driver aims for interoperability by using CDC ECM unless
+ *
+ *		can_support_ecm()
+ *
+ * returns false, in which case it supports the CDC Subset.  By default,
+ * that returns true; most hardware has no problems with CDC ECM, that's
+ * a good default.  Previous versions of this driver had no default; this
+ * version changes that, removing overhead for new controller support.
+ *
+ *	IF YOUR HARDWARE CAN'T SUPPORT CDC ECM, UPDATE THAT ROUTINE!
+ */
+
+static inline bool has_rndis(void)
+{
+#ifdef	USB_ETH_RNDIS
+	return true;
+#else
+	return false;
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_ecm.c"
+#include "f_subset.c"
+#ifdef	USB_ETH_RNDIS
+#include "f_rndis.c"
+#include "rndis.c"
+#endif
+#include "f_eem.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ * It's for devices with only CDC Ethernet configurations.
+ */
+#define CDC_VENDOR_NUM		0x0525	/* NetChip */
+#define CDC_PRODUCT_NUM		0xa4a1	/* Linux-USB Ethernet Gadget */
+
+/* For hardware that can't talk CDC, we use the same vendor ID that
+ * ARM Linux has used for ethernet-over-usb, both with sa1100 and
+ * with pxa250.  We're protocol-compatible, if the host-side drivers
+ * use the endpoint descriptors.  bcdDevice (version) is nonzero, so
+ * drivers that need to hard-wire endpoint numbers have a hook.
+ *
+ * The protocol is a minimal subset of CDC Ether, which works on any bulk
+ * hardware that's not deeply broken ... even on hardware that can't talk
+ * RNDIS (like SA-1100, with no interrupt endpoint, or anything that
+ * doesn't handle control-OUT).
+ */
+#define	SIMPLE_VENDOR_NUM	0x049f
+#define	SIMPLE_PRODUCT_NUM	0x505a
+
+/* For hardware that can talk RNDIS and either of the above protocols,
+ * use this ID ... the windows INF files will know it.  Unless it's
+ * used with CDC Ethernet, Linux 2.4 hosts will need updates to choose
+ * the non-RNDIS configuration.
+ */
+#define RNDIS_VENDOR_NUM	0x0525	/* NetChip */
+#define RNDIS_PRODUCT_NUM	0xa4a2	/* Ethernet/RNDIS Gadget */
+
+/* For EEM gadgets */
+#define EEM_VENDOR_NUM		0x1d6b	/* Linux Foundation */
+#define EEM_PRODUCT_NUM		0x0102	/* EEM Gadget */
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16 (0x0200),
+
+	.bDeviceClass =		USB_CLASS_COMM,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id defaults change according to what configs
+	 * we support.  (As does bNumConfigurations.)  These values can
+	 * also be overridden by module parameters.
+	 */
+	.idVendor =		cpu_to_le16 (CDC_VENDOR_NUM),
+	.idProduct =		cpu_to_le16 (CDC_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = PREFIX DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static u8 hostaddr[ETH_ALEN];
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * We may not have an RNDIS configuration, but if we do it needs to be
+ * the first one present.  That's to make Microsoft's drivers happy,
+ * and to follow DOCSIS 1.0 (cable modem standard).
+ */
+static int __init rndis_do_config(struct usb_configuration *c)
+{
+	/* FIXME alloc iConfiguration string, set it in c->strings */
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	return rndis_bind_config(c, hostaddr);
+}
+
+static struct usb_configuration rndis_config_driver = {
+	.label			= "RNDIS",
+	.bConfigurationValue	= 2,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_ETH_EEM
+static bool use_eem = 1;
+#else
+static bool use_eem;
+#endif
+module_param(use_eem, bool, 0);
+MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
+
+/*
+ * We _always_ have an ECM, CDC Subset, or EEM configuration.
+ */
+static int __init eth_do_config(struct usb_configuration *c)
+{
+	/* FIXME alloc iConfiguration string, set it in c->strings */
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	if (use_eem)
+		return eem_bind_config(c);
+	else if (can_support_ecm(c->cdev->gadget))
+		return ecm_bind_config(c, hostaddr);
+	else
+		return geth_bind_config(c, hostaddr);
+}
+
+static struct usb_configuration eth_config_driver = {
+	/* .label = f(hardware) */
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init eth_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			status;
+
+	/* set up network link layer */
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		return status;
+
+	/* set up main config label and device descriptor */
+	if (use_eem) {
+		/* EEM */
+		eth_config_driver.label = "CDC Ethernet (EEM)";
+		device_desc.idVendor = cpu_to_le16(EEM_VENDOR_NUM);
+		device_desc.idProduct = cpu_to_le16(EEM_PRODUCT_NUM);
+	} else if (can_support_ecm(cdev->gadget)) {
+		/* ECM */
+		eth_config_driver.label = "CDC Ethernet (ECM)";
+	} else {
+		/* CDC Subset */
+		eth_config_driver.label = "CDC Subset/SAFE";
+
+		device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM);
+		device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM);
+		if (!has_rndis())
+			device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
+	}
+
+	if (has_rndis()) {
+		/* RNDIS plus ECM-or-Subset */
+		device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM);
+		device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM);
+		device_desc.bNumConfigurations = 2;
+	}
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		/* We assume that can_support_ecm() tells the truth;
+		 * but if the controller isn't recognized at all then
+		 * that assumption is a bit more likely to be wrong.
+		 */
+		dev_warn(&gadget->dev,
+				"controller '%s' not recognized; trying %s\n",
+				gadget->name,
+				eth_config_driver.label);
+		device_desc.bcdDevice =
+			cpu_to_le16(0x0300 | 0x0099);
+	}
+
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	/* register our configuration(s); RNDIS first, if it's used */
+	if (has_rndis()) {
+		status = usb_add_config(cdev, &rndis_config_driver,
+				rndis_do_config);
+		if (status < 0)
+			goto fail;
+	}
+
+	status = usb_add_config(cdev, &eth_config_driver, eth_do_config);
+	if (status < 0)
+		goto fail;
+
+	dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
+			DRIVER_DESC);
+
+	return 0;
+
+fail:
+	gether_cleanup();
+	return status;
+}
+
+static int __exit eth_unbind(struct usb_composite_dev *cdev)
+{
+	gether_cleanup();
+	return 0;
+}
+
+static struct usb_composite_driver eth_driver = {
+	.name		= "g_ether",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_SUPER,
+	.unbind		= __exit_p(eth_unbind),
+};
+
+MODULE_DESCRIPTION(PREFIX DRIVER_DESC);
+MODULE_AUTHOR("David Brownell, Benedikt Spanger");
+MODULE_LICENSE("GPL");
+
+static int __init init(void)
+{
+	return usb_composite_probe(&eth_driver, eth_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&eth_driver);
+}
+module_exit(cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_accessory.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_accessory.c
new file mode 100644
index 0000000..a244265
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_accessory.c
@@ -0,0 +1,1180 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#define BULK_BUFFER_SIZE    16384
+#define ACC_STRING_SIZE     256
+
+#define PROTOCOL_VERSION    2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+	struct list_head	list;
+	struct hid_device *hid;
+	struct acc_dev *dev;
+	/* accessory defined ID */
+	int id;
+	/* HID report descriptor */
+	u8 *report_desc;
+	/* length of HID report descriptor */
+	int report_desc_len;
+	/* number of bytes of report_desc we have received so far */
+	int report_desc_offset;
+};
+
+struct acc_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	/* set to 1 when we connect */
+	int online:1;
+	/* Set to 1 when we disconnect.
+	 * Not cleared until our file is closed.
+	 */
+	int disconnected:1;
+
+	/* strings sent by the host */
+	char manufacturer[ACC_STRING_SIZE];
+	char model[ACC_STRING_SIZE];
+	char description[ACC_STRING_SIZE];
+	char version[ACC_STRING_SIZE];
+	char uri[ACC_STRING_SIZE];
+	char serial[ACC_STRING_SIZE];
+
+	/* for acc_complete_set_string */
+	int string_index;
+
+	/* set to 1 if we have a pending start request */
+	int start_requested;
+
+	int audio_mode;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+
+	struct list_head tx_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* delayed work for handling ACCESSORY_START */
+	struct delayed_work start_work;
+
+	/* worker for registering and unregistering hid devices */
+	struct work_struct hid_work;
+
+	/* list of active HID devices */
+	struct list_head	hid_list;
+
+	/* list of new HID devices to register */
+	struct list_head	new_hid_list;
+
+	/* list of dead HID devices to unregister */
+	struct list_head	dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_out_desc,
+	NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+	[INTERFACE_STRING_INDEX].s	= "Android Accessory Interface",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+	&acc_string_table,
+	NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+	return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+	dev->online = 0;
+	dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	if (req->status != 0)
+		acc_set_disconnected(dev);
+
+	req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	dev->rx_done = 1;
+	if (req->status != 0)
+		acc_set_disconnected(dev);
+
+	wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev	*dev = ep->driver_data;
+	char *string_dest = NULL;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_string, err %d\n", req->status);
+		return;
+	}
+
+	switch (dev->string_index) {
+	case ACCESSORY_STRING_MANUFACTURER:
+		string_dest = dev->manufacturer;
+		break;
+	case ACCESSORY_STRING_MODEL:
+		string_dest = dev->model;
+		break;
+	case ACCESSORY_STRING_DESCRIPTION:
+		string_dest = dev->description;
+		break;
+	case ACCESSORY_STRING_VERSION:
+		string_dest = dev->version;
+		break;
+	case ACCESSORY_STRING_URI:
+		string_dest = dev->uri;
+		break;
+	case ACCESSORY_STRING_SERIAL:
+		string_dest = dev->serial;
+		break;
+	}
+	if (string_dest) {
+		unsigned long flags;
+
+		if (length >= ACC_STRING_SIZE)
+			length = ACC_STRING_SIZE - 1;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		memcpy(string_dest, req->buf, length);
+		/* ensure zero termination */
+		string_dest[length] = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		pr_err("unknown accessory string index %d\n",
+			dev->string_index);
+	}
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	struct acc_dev *dev = hid->dev;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_hid_report_desc, err %d\n",
+			req->status);
+		return;
+	}
+
+	memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+	hid->report_desc_offset += length;
+	if (hid->report_desc_offset == hid->report_desc_len) {
+		/* After we have received the entire report descriptor
+		 * we schedule work to initialize the HID device
+		 */
+		schedule_work(&dev->hid_work);
+	}
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+		return;
+	}
+
+	hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+	struct acc_hid_dev *hdev = hid->driver_data;
+
+	hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+	return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+	.parse = acc_hid_parse,
+	.start = acc_hid_start,
+	.stop = acc_hid_stop,
+	.open = acc_hid_open,
+	.close = acc_hid_close,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+		int id, int desc_len)
+{
+	struct acc_hid_dev *hdev;
+
+	hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+	if (!hdev)
+		return NULL;
+	hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+	if (!hdev->report_desc) {
+		kfree(hdev);
+		return NULL;
+	}
+	hdev->dev = dev;
+	hdev->id = id;
+	hdev->report_desc_len = desc_len;
+
+	return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+	struct acc_hid_dev *hid;
+
+	list_for_each_entry(hid, list, list) {
+		if (hid->id == id)
+			return hid;
+	}
+	return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	/* report descriptor length must be > 0 */
+	if (desc_length <= 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	/* replace HID if one already exists with this ID */
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (hid)
+		list_move(&hid->list, &dev->dead_hid_list);
+
+	hid = acc_hid_new(dev, id, desc_length);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -ENOMEM;
+	}
+
+	list_add(&hid->list, &dev->new_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* schedule work to register the HID device */
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -EINVAL;
+	}
+
+	list_move(&hid->list, &dev->dead_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_in;
+		req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_out;
+		dev->rx_req[i] = req;
+	}
+
+	return 0;
+
+fail:
+	pr_err("acc_bind() could not allocate requests\n");
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+	return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret = 0;
+
+	pr_debug("acc_read(%d)\n", count);
+
+	if (dev->disconnected)
+		return -ENODEV;
+
+	if (count > BULK_BUFFER_SIZE)
+		count = BULK_BUFFER_SIZE;
+
+	/* we will block until we're online */
+	pr_debug("acc_read: waiting for online\n");
+	ret = wait_event_interruptible(dev->read_wq, dev->online);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		pr_debug("rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		goto done;
+	}
+	if (dev->online) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		pr_debug("rx %p %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	pr_debug("acc_read returning %d\n", r);
+	return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	pr_debug("acc_write(%d)\n", count);
+
+	if (!dev->online || dev->disconnected)
+		return -ENODEV;
+
+	while (count > 0) {
+		if (!dev->online) {
+			pr_debug("acc_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > BULK_BUFFER_SIZE)
+			xfer = BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+		if (copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			pr_debug("acc_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		req_put(dev, &dev->tx_idle, req);
+
+	pr_debug("acc_write returning %d\n", r);
+	return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct acc_dev *dev = fp->private_data;
+	char *src = NULL;
+	int ret;
+
+	switch (code) {
+	case ACCESSORY_GET_STRING_MANUFACTURER:
+		src = dev->manufacturer;
+		break;
+	case ACCESSORY_GET_STRING_MODEL:
+		src = dev->model;
+		break;
+	case ACCESSORY_GET_STRING_DESCRIPTION:
+		src = dev->description;
+		break;
+	case ACCESSORY_GET_STRING_VERSION:
+		src = dev->version;
+		break;
+	case ACCESSORY_GET_STRING_URI:
+		src = dev->uri;
+		break;
+	case ACCESSORY_GET_STRING_SERIAL:
+		src = dev->serial;
+		break;
+	case ACCESSORY_IS_START_REQUESTED:
+		return dev->start_requested;
+	case ACCESSORY_GET_AUDIO_MODE:
+		return dev->audio_mode;
+	}
+	if (!src)
+		return -EINVAL;
+
+	ret = strlen(src) + 1;
+	if (copy_to_user((void __user *)value, src, ret))
+		ret = -EFAULT;
+	return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_open\n");
+	if (atomic_xchg(&_acc_dev->open_excl, 1))
+		return -EBUSY;
+
+	_acc_dev->disconnected = 0;
+	fp->private_data = _acc_dev;
+	return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_release\n");
+
+	WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+	_acc_dev->disconnected = 0;
+	return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+	.owner = THIS_MODULE,
+	.read = acc_read,
+	.write = acc_write,
+	.unlocked_ioctl = acc_ioctl,
+	.open = acc_open,
+	.release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+		const struct hid_device_id *id)
+{
+	int ret;
+
+	ret = hid_parse(hdev);
+	if (ret)
+		return ret;
+	return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_accessory",
+	.fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+	{ HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+	{ }
+};
+
+static struct hid_driver acc_hid_driver = {
+	.name = "USB accessory",
+	.id_table = acc_hid_table,
+	.probe = acc_hid_probe,
+};
+
+static int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct acc_dev	*dev = _acc_dev;
+	int	value = -EOPNOTSUPP;
+	struct acc_hid_dev *hid;
+	int offset;
+	u8 b_requestType = ctrl->bRequestType;
+	u8 b_request = ctrl->bRequest;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long flags;
+
+/*
+	printk(KERN_INFO "acc_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			b_requestType, b_request,
+			w_value, w_index, w_length);
+*/
+
+	if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_START) {
+			dev->start_requested = 1;
+			schedule_delayed_work(
+				&dev->start_work, msecs_to_jiffies(10));
+			value = 0;
+		} else if (b_request == ACCESSORY_SEND_STRING) {
+			dev->string_index = w_index;
+			cdev->gadget->ep0->driver_data = dev;
+			cdev->req->complete = acc_complete_set_string;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+				w_index == 0 && w_length == 0) {
+			dev->audio_mode = w_value;
+			value = 0;
+		} else if (b_request == ACCESSORY_REGISTER_HID) {
+			value = acc_register_hid(dev, w_value, w_index);
+		} else if (b_request == ACCESSORY_UNREGISTER_HID) {
+			value = acc_unregister_hid(dev, w_value);
+		} else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->new_hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			offset = w_index;
+			if (offset != hid->report_desc_offset
+				|| offset + w_length > hid->report_desc_len) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_set_hid_report_desc;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_send_hid_event;
+			value = w_length;
+		}
+	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_GET_PROTOCOL) {
+			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+			value = sizeof(u16);
+
+			/* clear any string left over from a previous session */
+			memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+			memset(dev->model, 0, sizeof(dev->model));
+			memset(dev->description, 0, sizeof(dev->description));
+			memset(dev->version, 0, sizeof(dev->version));
+			memset(dev->uri, 0, sizeof(dev->uri));
+			memset(dev->serial, 0, sizeof(dev->serial));
+			dev->start_requested = 0;
+			dev->audio_mode = 0;
+		}
+	}
+
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "%s setup response queue error\n",
+				__func__);
+	}
+
+err:
+	if (value == -EOPNOTSUPP)
+		VDBG(cdev,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	return value;
+}
+
+static int
+acc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct acc_dev	*dev = func_to_dev(f);
+	int			id;
+	int			ret;
+
+	DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+	ret = hid_register_driver(&acc_hid_driver);
+	if (ret)
+		return ret;
+
+	dev->start_requested = 0;
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	acc_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+			&acc_fullspeed_out_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		acc_highspeed_in_desc.bEndpointAddress =
+			acc_fullspeed_in_desc.bEndpointAddress;
+		acc_highspeed_out_desc.bEndpointAddress =
+			acc_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+	struct acc_hid_dev *hid;
+	struct list_head *entry, *temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(entry, temp, &dev->hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+	hid_unregister_driver(&acc_hid_driver);
+	kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_request *req;
+	int i;
+
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+
+	acc_hid_unbind(dev);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=START", NULL };
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+	struct hid_device *hid;
+	int ret;
+
+	hid = hid_allocate_device();
+	if (IS_ERR(hid))
+		return PTR_ERR(hid);
+
+	hid->ll_driver = &acc_hid_ll_driver;
+	hid->dev.parent = acc_device.this_device;
+
+	hid->bus = BUS_USB;
+	hid->vendor = HID_ANY_ID;
+	hid->product = HID_ANY_ID;
+	hid->driver_data = hdev;
+	ret = hid_add_device(hid);
+	if (ret) {
+		pr_err("can't add hid device: %d\n", ret);
+		hid_destroy_device(hid);
+		return ret;
+	}
+
+	hdev->hid = hid;
+	return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+	kfree(hid->report_desc);
+	kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+	struct acc_dev *dev = _acc_dev;
+	struct list_head	*entry, *temp;
+	struct acc_hid_dev *hid;
+	struct list_head	new_list, dead_list;
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&new_list);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* copy hids that are ready for initialization to new_list */
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (hid->report_desc_offset == hid->report_desc_len)
+			list_move(&hid->list, &new_list);
+	}
+
+	if (list_empty(&dev->dead_hid_list)) {
+		INIT_LIST_HEAD(&dead_list);
+	} else {
+		/* move all of dev->dead_hid_list to dead_list */
+		dead_list.prev = dev->dead_hid_list.prev;
+		dead_list.next = dev->dead_hid_list.next;
+		dead_list.next->prev = &dead_list;
+		dead_list.prev->next = &dead_list;
+		INIT_LIST_HEAD(&dev->dead_hid_list);
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* register new HID devices */
+	list_for_each_safe(entry, temp, &new_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (acc_hid_init(hid)) {
+			pr_err("can't add HID device %p\n", hid);
+			acc_hid_delete(hid);
+		} else {
+			spin_lock_irqsave(&dev->lock, flags);
+			list_move(&hid->list, &dev->hid_list);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		}
+	}
+
+	/* remove dead HID devices */
+	list_for_each_safe(entry, temp, &dead_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		if (hid->hid)
+			hid_destroy_device(hid->hid);
+		acc_hid_delete(hid);
+	}
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	dev->online = 1;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "acc_function_disable\n");
+	acc_set_disconnected(dev);
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_bind_config(struct usb_configuration *c)
+{
+	struct acc_dev *dev = _acc_dev;
+	int ret;
+
+	printk(KERN_INFO "acc_bind_config\n");
+
+	/* allocate a string ID for our interface */
+	if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+		ret = usb_string_id(c->cdev);
+		if (ret < 0)
+			return ret;
+		acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+		acc_interface_desc.iInterface = ret;
+	}
+
+	dev->cdev = c->cdev;
+	dev->function.name = "accessory";
+	dev->function.strings = acc_strings,
+	dev->function.descriptors = fs_acc_descs;
+	dev->function.hs_descriptors = hs_acc_descs;
+	dev->function.bind = acc_function_bind;
+	dev->function.unbind = acc_function_unbind;
+	dev->function.set_alt = acc_function_set_alt;
+	dev->function.disable = acc_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int acc_setup(void)
+{
+	struct acc_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	atomic_set(&dev->open_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->hid_list);
+	INIT_LIST_HEAD(&dev->new_hid_list);
+	INIT_LIST_HEAD(&dev->dead_hid_list);
+	INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+	INIT_WORK(&dev->hid_work, acc_hid_work);
+
+	/* _acc_dev must be set before calling usb_gadget_register_driver */
+	_acc_dev = dev;
+
+	ret = misc_register(&acc_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	pr_err("USB accessory gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void acc_disconnect(void)
+{
+	/* unregister all HID devices if USB is disconnected */
+	kill_all_hid_devices(_acc_dev);
+}
+
+static void acc_cleanup(void)
+{
+	misc_deregister(&acc_device);
+	kfree(_acc_dev);
+	_acc_dev = NULL;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_acm.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_acm.c
new file mode 100755
index 0000000..4bcb751
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_acm.c
@@ -0,0 +1,838 @@
+/*
+ * f_acm.c -- USB CDC serial (ACM) function driver
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 2009 by Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include "u_serial.h"
+#include "gadget_chips.h"
+
+
+/*
+ * This CDC ACM function support just wraps control functions and
+ * notifications around the generic serial-over-usb code.
+ *
+ * Because CDC ACM is standardized by the USB-IF, many host operating
+ * systems have drivers for it.  Accordingly, ACM is the preferred
+ * interop solution for serial-port type connections.  The control
+ * models are often not necessary, and in any case don't do much in
+ * this bare-bones implementation.
+ *
+ * Note that even MS-Windows has some support for ACM.  However, that
+ * support is somewhat broken because when you use ACM in a composite
+ * device, having multiple interfaces confuses the poor OS.  It doesn't
+ * seem to understand CDC Union descriptors.  The new "association"
+ * descriptors (roughly equivalent to CDC Unions) may sometimes help.
+ */
+
+struct f_acm {
+	struct gserial			port;
+	u8				ctrl_id, data_id;
+	u8				port_num;
+
+	u8				pending;
+
+	/* lock is mostly for pending and notify_req ... they get accessed
+	 * by callbacks both from tty (open/close/break) under its spinlock,
+	 * and notify_req.complete() which can't use that lock.
+	 */
+	spinlock_t			lock;
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+
+	struct usb_cdc_line_coding	port_line_coding;	/* 8-N-1 etc */
+
+	/* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
+	u16				port_handshake_bits;
+#define ACM_CTRL_RTS	(1 << 1)	/* unused with full duplex */
+#define ACM_CTRL_DTR	(1 << 0)	/* host is ready for data r/w */
+
+	/* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
+	u16				serial_state;
+#define ACM_CTRL_OVERRUN	(1 << 6)
+#define ACM_CTRL_PARITY		(1 << 5)
+#define ACM_CTRL_FRAMING	(1 << 4)
+#define ACM_CTRL_RI		(1 << 3)
+#define ACM_CTRL_BRK		(1 << 2)
+#define ACM_CTRL_DSR		(1 << 1)
+#define ACM_CTRL_DCD		(1 << 0)
+};
+
+static inline struct f_acm *func_to_acm(struct usb_function *f)
+{
+	return container_of(f, struct f_acm, port.func);
+}
+
+static inline struct f_acm *port_to_acm(struct gserial *p)
+{
+	return container_of(p, struct f_acm, port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* notification endpoint uses smallish and infrequent fixed-size messages */
+
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		10	/* notification + 2 bytes */
+
+/* interface and class descriptors: */
+
+static struct usb_interface_assoc_descriptor
+acm_iad_descriptor = {
+	.bLength =		sizeof acm_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	/* .bFirstInterface =	DYNAMIC, */
+	.bInterfaceCount = 	2,	// control + data
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_ACM,
+	.bFunctionProtocol =	USB_CDC_ACM_PROTO_AT_V25TER,
+	/* .iFunction =		DYNAMIC */
+};
+
+
+static struct usb_interface_descriptor acm_control_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ACM,
+	.bInterfaceProtocol =	USB_CDC_ACM_PROTO_AT_V25TER,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_interface_descriptor acm_data_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc acm_header_desc = {
+	.bLength =		sizeof(acm_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor
+acm_call_mgmt_descriptor = {
+	.bLength =		sizeof(acm_call_mgmt_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities =	0,
+	/* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor acm_descriptor = {
+	.bLength =		sizeof(acm_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+	.bmCapabilities =	USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc acm_union_desc = {
+	.bLength =		sizeof(acm_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor acm_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor acm_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acm_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *acm_fs_function[] = {
+	(struct usb_descriptor_header *) &acm_iad_descriptor,
+	(struct usb_descriptor_header *) &acm_control_interface_desc,
+	(struct usb_descriptor_header *) &acm_header_desc,
+	(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &acm_union_desc,
+	(struct usb_descriptor_header *) &acm_fs_notify_desc,
+	(struct usb_descriptor_header *) &acm_data_interface_desc,
+	(struct usb_descriptor_header *) &acm_fs_in_desc,
+	(struct usb_descriptor_header *) &acm_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor acm_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_endpoint_descriptor acm_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acm_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *acm_hs_function[] = {
+	(struct usb_descriptor_header *) &acm_iad_descriptor,
+	(struct usb_descriptor_header *) &acm_control_interface_desc,
+	(struct usb_descriptor_header *) &acm_header_desc,
+	(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &acm_union_desc,
+	(struct usb_descriptor_header *) &acm_hs_notify_desc,
+	(struct usb_descriptor_header *) &acm_data_interface_desc,
+	(struct usb_descriptor_header *) &acm_hs_in_desc,
+	(struct usb_descriptor_header *) &acm_hs_out_desc,
+	NULL,
+};
+
+static struct usb_endpoint_descriptor acm_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor acm_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acm_ss_bulk_comp_desc = {
+	.bLength =              sizeof acm_ss_bulk_comp_desc,
+	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *acm_ss_function[] = {
+	(struct usb_descriptor_header *) &acm_iad_descriptor,
+	(struct usb_descriptor_header *) &acm_control_interface_desc,
+	(struct usb_descriptor_header *) &acm_header_desc,
+	(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &acm_union_desc,
+	(struct usb_descriptor_header *) &acm_hs_notify_desc,
+	(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &acm_data_interface_desc,
+	(struct usb_descriptor_header *) &acm_ss_in_desc,
+	(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &acm_ss_out_desc,
+	(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+#define ACM_CTRL_IDX	0
+#define ACM_DATA_IDX	1
+#define ACM_IAD_IDX	2
+
+/* static strings, in UTF-8 */
+static struct usb_string acm_string_defs[] = {
+	[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
+	[ACM_DATA_IDX].s = "CDC ACM Data",
+	[ACM_IAD_IDX ].s = "CDC Serial",
+	{  /* ZEROES END LIST */ },
+};
+
+static struct usb_gadget_strings acm_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		acm_string_defs,
+};
+
+static struct usb_gadget_strings *acm_strings[] = {
+	&acm_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* ACM control ... data handling is delegated to tty library code.
+ * The main task of this function is to activate and deactivate
+ * that code based on device state; track parameters like line
+ * speed, handshake state, and so on; and issue notifications.
+ */
+
+static void acm_complete_set_line_coding(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct f_acm	*acm = ep->driver_data;
+	struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+
+	if (req->status != 0) {
+		DBG(cdev, "acm ttyGS%d completion, err %d\n",
+				acm->port_num, req->status);
+		return;
+	}
+
+	/* normal completion */
+	if (req->actual != sizeof(acm->port_line_coding)) {
+		DBG(cdev, "acm ttyGS%d short resp, len %d\n",
+				acm->port_num, req->actual);
+		usb_ep_set_halt(ep);
+	} else {
+		struct usb_cdc_line_coding	*value = req->buf;
+
+		/* REVISIT:  we currently just remember this data.
+		 * If we change that, (a) validate it first, then
+		 * (b) update whatever hardware needs updating,
+		 * (c) worry about locking.  This is information on
+		 * the order of 9600-8-N-1 ... most of which means
+		 * nothing unless we control a real RS232 line.
+		 */
+		acm->port_line_coding = *value;
+		printk("acm_complete_set_line_coding, datarate:%d, char:%d, parity:%d, databit:%d\n", value->dwDTERate,  value->bCharFormat, value->bParityType, value->bDataBits);
+	}
+}
+
+static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_acm		*acm = func_to_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 *
+	 * Note CDC spec table 4 lists the ACM request profile.  It requires
+	 * encapsulated command support ... we don't handle any, and respond
+	 * to them by stalling.  Options include get/set/clear comm features
+	 * (not that useful) and SEND_BREAK.
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* SET_LINE_CODING ... just read and save what the host sends */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_LINE_CODING:
+		if (w_length != sizeof(struct usb_cdc_line_coding)
+				|| w_index != acm->ctrl_id)
+			goto invalid;
+
+		value = w_length;
+		cdev->gadget->ep0->driver_data = acm;
+		req->complete = acm_complete_set_line_coding;
+		break;
+
+	/* GET_LINE_CODING ... return what host sent, or initial value */
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_GET_LINE_CODING:
+		if (w_index != acm->ctrl_id)
+			goto invalid;
+
+		value = min_t(unsigned, w_length,
+				sizeof(struct usb_cdc_line_coding));
+		memcpy(req->buf, &acm->port_line_coding, value);
+		break;
+
+	/* SET_CONTROL_LINE_STATE ... save what the host sent */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		if (w_index != acm->ctrl_id)
+			goto invalid;
+
+		value = 0;
+
+		/* FIXME we should not allow data to flow until the
+		 * host sets the ACM_CTRL_DTR bit; and when it clears
+		 * that bit, we should return to that no-flow state.
+		 */
+		acm->port_handshake_bits = w_value;
+		break;
+
+	default:
+invalid:
+		VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "acm ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+			acm->port_num, ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "acm response on ttyGS%d, err %d\n",
+					acm->port_num, value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_acm		*acm = func_to_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret = 0;
+	/* we know alt == 0, so this is an activation or a reset */
+
+	if (intf == acm->ctrl_id) {
+		if (acm->notify->driver_data) {
+			VDBG(cdev, "reset acm control interface %d\n", intf);
+			usb_ep_disable(acm->notify);
+		} else {
+			VDBG(cdev, "init acm ctrl interface %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, acm->notify))
+				return -EINVAL;
+		}
+		ret = usb_ep_enable(acm->notify);
+		acm->notify->driver_data = acm;
+
+	} else if (intf == acm->data_id) {
+		if (acm->port.in->driver_data) {
+			DBG(cdev, "reset acm ttyGS%d\n", acm->port_num);
+			gserial_disconnect(&acm->port);
+		}
+		if (!acm->port.in->desc || !acm->port.out->desc) {
+			DBG(cdev, "activate acm ttyGS%d\n", acm->port_num);
+			if (config_ep_by_speed(cdev->gadget, f,
+					       acm->port.in) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       acm->port.out)) {
+				acm->port.in->desc = NULL;
+				acm->port.out->desc = NULL;
+				return -EINVAL;
+			}
+		}
+		gserial_connect(&acm->port, &acm->port_num);
+
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static void acm_disable(struct usb_function *f)
+{
+	struct f_acm	*acm = func_to_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
+	gserial_disconnect(&acm->port);
+	usb_ep_disable(acm->notify);
+	acm->notify->driver_data = NULL;
+}
+#ifdef CONFIG_PM
+unsigned int g_acm_suspend_cnt = 0;
+unsigned int g_acm_reume_cnt = 0;
+
+static void acm_suspend(struct usb_function *f)
+{
+	struct f_acm	*acm = func_to_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+    g_acm_suspend_cnt++;
+	usb_printk("%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_acm_suspend_cnt);
+	USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_acm_suspend_cnt);
+}
+static void acm_resume(struct usb_function *f)
+{
+	struct f_acm	*acm = func_to_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+    g_acm_reume_cnt++;
+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_acm_reume_cnt);
+	USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_acm_reume_cnt);
+}
+#endif
+/*-------------------------------------------------------------------------*/
+
+/**
+ * acm_cdc_notify - issue CDC notification to host
+ * @acm: wraps host to be notified
+ * @type: notification type
+ * @value: Refer to cdc specs, wValue field.
+ * @data: data to be sent
+ * @length: size of data
+ * Context: irqs blocked, acm->lock held, acm_notify_req non-null
+ *
+ * Returns zero on success or a negative errno.
+ *
+ * See section 6.3.5 of the CDC 1.1 specification for information
+ * about the only notification we issue:  SerialState change.
+ */
+static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
+		void *data, unsigned length)
+{
+	struct usb_ep			*ep = acm->notify;
+	struct usb_request		*req;
+	struct usb_cdc_notification	*notify;
+	const unsigned			len = sizeof(*notify) + length;
+	void				*buf;
+	int				status;
+
+	req = acm->notify_req;
+	acm->notify_req = NULL;
+	acm->pending = false;
+
+	req->length = len;
+	notify = req->buf;
+	buf = notify + 1;
+
+	notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	notify->bNotificationType = type;
+	notify->wValue = cpu_to_le16(value);
+	notify->wIndex = cpu_to_le16(acm->ctrl_id);
+	notify->wLength = cpu_to_le16(length);
+	memcpy(buf, data, length);
+
+	/* ep_queue() can complete immediately if it fills the fifo... */
+	spin_unlock(&acm->lock);
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	spin_lock(&acm->lock);
+
+	if (status < 0) {
+		ERROR(acm->port.func.config->cdev,
+				"acm ttyGS%d can't notify serial state, %d\n",
+				acm->port_num, status);
+		acm->notify_req = req;
+	}
+
+	return status;
+}
+
+static int acm_notify_serial_state(struct f_acm *acm)
+{
+	struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+	int			status;
+
+	spin_lock(&acm->lock);
+	if (acm->notify_req) {
+		DBG(cdev, "acm ttyGS%d serial state %04x\n",
+				acm->port_num, acm->serial_state);
+		status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
+				0, &acm->serial_state, sizeof(acm->serial_state));
+	} else {
+		acm->pending = true;
+		status = 0;
+	}
+	spin_unlock(&acm->lock);
+	return status;
+}
+
+static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_acm		*acm = req->context;
+	u8			doit = false;
+
+	/* on this call path we do NOT hold the port spinlock,
+	 * which is why ACM needs its own spinlock
+	 */
+	spin_lock(&acm->lock);
+	if (req->status != -ESHUTDOWN)
+		doit = acm->pending;
+	acm->notify_req = req;
+	spin_unlock(&acm->lock);
+
+	if (doit)
+		acm_notify_serial_state(acm);
+}
+
+/* connect == the TTY link is open */
+
+static void acm_connect(struct gserial *port)
+{
+	struct f_acm		*acm = port_to_acm(port);
+
+	acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+	acm_notify_serial_state(acm);
+}
+
+static void acm_disconnect(struct gserial *port)
+{
+	struct f_acm		*acm = port_to_acm(port);
+
+	acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+	acm_notify_serial_state(acm);
+}
+
+static int acm_send_break(struct gserial *port, int duration)
+{
+	struct f_acm		*acm = port_to_acm(port);
+	u16			state;
+
+	state = acm->serial_state;
+	state &= ~ACM_CTRL_BRK;
+	if (duration)
+		state |= ACM_CTRL_BRK;
+
+	acm->serial_state = state;
+	return acm_notify_serial_state(acm);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ACM function driver setup/binding */
+static int
+acm_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_acm		*acm = func_to_acm(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	acm->ctrl_id = status;
+	acm_iad_descriptor.bFirstInterface = status;
+
+	acm_control_interface_desc.bInterfaceNumber = status;
+	acm_union_desc .bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	acm->data_id = status;
+
+	acm_data_interface_desc.bInterfaceNumber = status;
+	acm_union_desc.bSlaveInterface0 = status;
+	acm_call_mgmt_descriptor.bDataInterface = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
+	if (!ep)
+		goto fail;
+	acm->port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
+	if (!ep)
+		goto fail;
+	acm->port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	acm->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* allocate notification */
+	acm->notify_req = gs_alloc_req(ep,
+			sizeof(struct usb_cdc_notification) + 2,
+			GFP_KERNEL);
+	if (!acm->notify_req)
+		goto fail;
+
+	acm->notify_req->complete = acm_cdc_notify_complete;
+	acm->notify_req->context = acm;
+
+	/* copy descriptors */
+	f->descriptors = usb_copy_descriptors(acm_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		acm_hs_in_desc.bEndpointAddress =
+				acm_fs_in_desc.bEndpointAddress;
+		acm_hs_out_desc.bEndpointAddress =
+				acm_fs_out_desc.bEndpointAddress;
+		acm_hs_notify_desc.bEndpointAddress =
+				acm_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors */
+		f->hs_descriptors = usb_copy_descriptors(acm_hs_function);
+	}
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		acm_ss_in_desc.bEndpointAddress =
+			acm_fs_in_desc.bEndpointAddress;
+		acm_ss_out_desc.bEndpointAddress =
+			acm_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(acm_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			acm->port_num,
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			acm->port.in->name, acm->port.out->name,
+			acm->notify->name);
+	return 0;
+
+fail:
+	if (acm->notify_req)
+		gs_free_req(acm->notify, acm->notify_req);
+
+	/* we might as well release our claims on endpoints */
+	if (acm->notify)
+		acm->notify->driver_data = NULL;
+	if (acm->port.out)
+		acm->port.out->driver_data = NULL;
+	if (acm->port.in)
+		acm->port.in->driver_data = NULL;
+
+	ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+
+	return status;
+}
+
+static void
+acm_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_acm		*acm = func_to_acm(f);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	usb_free_descriptors(f->descriptors);
+	gs_free_req(acm->notify, acm->notify_req);
+	kfree(acm);
+}
+
+/* Some controllers can't support CDC ACM ... */
+static inline bool can_support_cdc(struct usb_configuration *c)
+{
+	/* everything else is *probably* fine ... */
+	return true;
+}
+
+/**
+ * acm_bind_config - add a CDC ACM function to a configuration
+ * @c: the configuration to support the CDC ACM instance
+ * @port_num: /dev/ttyGS* port this interface will use
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gserial_setup() with enough ports to
+ * handle all the ones it binds.  Caller is also responsible
+ * for calling @gserial_cleanup() before module unload.
+ */
+int acm_bind_config(struct usb_configuration *c, u8 port_num)
+{
+	struct f_acm	*acm;
+	int		status;
+
+	if (!can_support_cdc(c))
+		return -EINVAL;
+
+	/* REVISIT might want instance-specific strings to help
+	 * distinguish instances ...
+	 */
+
+	/* maybe allocate device-global string IDs, and patch descriptors */
+	if (acm_string_defs[ACM_CTRL_IDX].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		acm_string_defs[ACM_CTRL_IDX].id = status;
+
+		acm_control_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		acm_string_defs[ACM_DATA_IDX].id = status;
+
+		acm_data_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		acm_string_defs[ACM_IAD_IDX].id = status;
+
+		acm_iad_descriptor.iFunction = status;
+	}
+
+	/* allocate and initialize one new instance */
+	acm = kzalloc(sizeof *acm, GFP_KERNEL);
+	if (!acm)
+		return -ENOMEM;
+
+	spin_lock_init(&acm->lock);
+
+	acm->port_num = port_num;
+
+	acm->port.connect = acm_connect;
+	acm->port.disconnect = acm_disconnect;
+	acm->port.send_break = acm_send_break;
+
+	acm->port.func.name = "acm";
+	acm->port.func.strings = acm_strings;
+	/* descriptors are per-instance copies */
+	acm->port.func.bind = acm_bind;
+	acm->port.func.unbind = acm_unbind;
+	acm->port.func.set_alt = acm_set_alt;
+	acm->port.func.setup = acm_setup;
+	acm->port.func.disable = acm_disable;
+#if 0 //#ifdef CONFIG_PM
+	acm->port.func.suspend = acm_suspend;
+	acm->port.func.resume = acm_resume;
+#endif
+	status = usb_add_function(c, &acm->port.func);
+	if (status)
+		kfree(acm);
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_adb.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_adb.c
new file mode 100644
index 0000000..17259b7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_adb.c
@@ -0,0 +1,753 @@
+/*
+ * Gadget Driver for Android ADB
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#define ADB_BULK_BUFFER_SIZE           4096
+
+/* number of tx requests to allocate */
+#define TX_REQ_MAX 4
+
+static const char adb_shortname[] = "android_adb";
+
+struct adb_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	int online;
+	int error;
+
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+	atomic_t enable_excl;
+
+	struct list_head tx_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	struct usb_request *rx_req;
+	int rx_done;
+#ifdef CONFIG_PM
+	u32 suspend_state;
+#endif
+};
+
+static struct usb_interface_descriptor adb_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = 0xFF,
+	.bInterfaceSubClass     = 0x42,
+	.bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_adb_descs[] = {
+	(struct usb_descriptor_header *) &adb_interface_desc,
+	(struct usb_descriptor_header *) &adb_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &adb_fullspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_adb_descs[] = {
+	(struct usb_descriptor_header *) &adb_interface_desc,
+	(struct usb_descriptor_header *) &adb_highspeed_in_desc,
+	(struct usb_descriptor_header *) &adb_highspeed_out_desc,
+	NULL,
+};
+
+static void adb_ready_callback(void);
+static void adb_closed_callback(void);
+
+/* temporary variable used between adb_open() and adb_gadget_bind() */
+static struct adb_dev *_adb_dev;
+
+static inline struct adb_dev *func_to_adb(struct usb_function *f)
+{
+	return container_of(f, struct adb_dev, function);
+}
+
+
+static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void adb_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static inline int adb_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void adb_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+void adb_req_put(struct adb_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+struct usb_request *adb_req_get(struct adb_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void adb_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct adb_dev *dev = _adb_dev;
+
+	if (req->status != 0)
+		USBSTACK_DBG("adb_complete_in err: %d", req->status);
+	//	dev->error = 1;
+
+	adb_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void adb_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct adb_dev *dev = _adb_dev;
+
+	dev->rx_done = 1;
+	//if (req->status != 0 && req->status != -ECONNRESET)
+	//	dev->error = 1;
+
+	if (req->status != 0)
+		USBSTACK_DBG("adb_complete_out err: %d", req->status);
+
+	wake_up(&dev->read_wq);
+}
+
+static int adb_create_bulk_endpoints(struct adb_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	req = adb_request_new(dev->ep_out, ADB_BULK_BUFFER_SIZE);
+	if (!req)
+		goto fail;
+	req->complete = adb_complete_out;
+	dev->rx_req = req;
+
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = adb_request_new(dev->ep_in, ADB_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = adb_complete_in;
+		adb_req_put(dev, &dev->tx_idle, req);
+	}
+
+	return 0;
+
+fail:
+	printk(KERN_ERR "adb_bind() could not allocate requests\n");
+	return -1;
+}
+
+static ssize_t adb_read(struct file *fp, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	struct adb_dev *dev = fp->private_data;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret;
+
+	pr_debug("adb_read(%d)\n", count);
+	if (!_adb_dev)
+		return -ENODEV;
+
+	if (count > ADB_BULK_BUFFER_SIZE)
+		return -EINVAL;
+
+	if (adb_lock(&dev->read_excl))
+		return -EBUSY;
+
+	/* we will block until we're online */
+	while (!(dev->online || dev->error)) {
+		pr_debug("adb_read: waiting for online state\n");
+		ret = wait_event_interruptible(dev->read_wq,
+				(dev->online || dev->error));
+		if (ret < 0) {
+			adb_unlock(&dev->read_excl);
+			return ret;
+		}
+	}
+	if (dev->error) {
+		r = -EIO;
+		USBSTACK_DBG("adb_read block fail ret:%d", r);
+		goto done;
+	}
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req;
+	req->length = count;
+	dev->rx_done = 0;
+	if(!dev->online){
+		printk("adb_read dev is offline\n");
+		r = -EIO;
+		goto done;
+	}
+	ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
+	if (ret < 0) {
+		pr_debug("adb_read: failed to queue req %p (%d)\n", req, ret);
+		r = -EIO;
+		dev->error = 1;
+		USBSTACK_DBG("adb_read ep-queue fail ret:%d", r);
+		goto done;
+	} else {
+		pr_debug("rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		if (ret != -ERESTARTSYS)
+			dev->error = 1;
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		USBSTACK_DBG("adb_read stop ret: 0x%x", ret);
+		goto done;
+	}
+	if (!dev->error) {
+		if(!dev->online){
+			printk("adb_read dev is offline cannot requeue req\n");
+			r = -EIO;
+			goto done;
+		}
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		pr_debug("rx %p %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+
+	} else{
+		USBSTACK_DBG("adb_read fail %d", r);
+		r = -EIO;
+	}
+
+done:
+	adb_unlock(&dev->read_excl);
+	pr_debug("adb_read returning %d\n", r);
+	return r;
+}
+#ifdef CONFIG_PM
+unsigned int g_dbg_adb_times = 0;
+#endif
+static ssize_t adb_write(struct file *fp, const char __user *buf,
+				 size_t count, loff_t *pos)
+{
+	struct adb_dev *dev = fp->private_data;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	if (!_adb_dev)
+		return -ENODEV;
+	//pr_debug("adb_write(%d)\n", count);
+	USBSTACK_DBG("adb write begin");
+
+#ifdef CONFIG_PM
+	if (dev->suspend_state == 1){
+		g_dbg_adb_times++;
+		usb_printk("%s, %u portname:%s, wrtime:%d\n", __func__, __LINE__, dev->function.name, g_dbg_adb_times);
+		USBSTACK_DBG("%s, %u portname:%s, wrtime:%d", __func__, __LINE__, dev->function.name, g_dbg_adb_times);
+		return -EBUSY;
+#if 0
+        usb_gadget_wakeup(dev->cdev->gadget);
+		do{
+			msleep(2);
+		}while(dev->suspend_state==1);
+#endif
+	}
+#endif
+	if (adb_lock(&dev->write_excl))
+		return -EBUSY;
+	while (count > 0) {
+		if (dev->error) {
+			pr_debug("adb_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			(req = adb_req_get(dev, &dev->tx_idle)) || dev->error);
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+		if(dev->error){
+			printk("get an err, break write\n");
+			r = -EIO;
+			break;			
+		}
+
+		if (req != 0) {
+			if (count > ADB_BULK_BUFFER_SIZE)
+				xfer = ADB_BULK_BUFFER_SIZE;
+			else
+				xfer = count;
+			if (copy_from_user(req->buf, buf, xfer)) {
+				r = -EFAULT;
+				break;
+			}
+
+			req->length = xfer;
+			ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+			if (ret < 0) {
+				pr_debug("adb_write: xfer error %d\n", ret);
+				dev->error = 1;
+				r = -EIO;
+				break;
+			}
+
+			buf += xfer;
+			count -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+
+	if (req)
+		adb_req_put(dev, &dev->tx_idle, req);
+
+	adb_unlock(&dev->write_excl);
+	pr_debug("adb_write returning %d\n", r);
+	return r;
+}
+
+static int adb_open(struct inode *ip, struct file *fp)
+{
+	//pr_info("adb_open\n");
+	if (!_adb_dev)
+		return -ENODEV;
+	USBSTACK_DBG("%s", __func__);
+	if (adb_lock(&_adb_dev->open_excl))
+		return -EBUSY;
+
+	fp->private_data = _adb_dev;
+
+	/* clear the error latch */
+	_adb_dev->error = 0;
+
+	//adb_ready_callback();
+
+	if(atomic_read(&_adb_dev->enable_excl)){
+		_adb_dev->online = 1;
+		wake_up(&_adb_dev->read_wq);
+	}
+
+	return 0;
+}
+
+static int adb_release(struct inode *ip, struct file *fp)
+{
+	int ret;
+	//pr_info("adb_release\n");
+	USBSTACK_DBG("%s", __func__);
+	printk("adb_release\n");
+	//adb_closed_callback();
+
+	_adb_dev->error = 1;
+	if(list_empty(&_adb_dev->tx_idle) && atomic_read(&_adb_dev->write_excl)){
+		printk("adb_release, disable and reenable endpoint");
+		ret = usb_ep_disable(_adb_dev->ep_in);
+		if(ret){
+			printk("adb_release, usb_ep_disable fail,%d\n", ret);
+			WARN_ON(1);
+		}
+		ret = usb_ep_enable(_adb_dev->ep_in);
+		if(ret){
+			printk("adb_release, usb_ep_enable fail,%d\n", ret);
+			WARN_ON(1);
+		}		
+	}
+	if(atomic_read(&_adb_dev->read_excl)){		
+		printk("adb_release, disable and reenable endpoint");
+		ret = usb_ep_disable(_adb_dev->ep_out);
+		if(ret){
+			printk("adb_release, usb_ep_disable fail,%d\n", ret);
+			WARN_ON(1);
+		}
+		ret = usb_ep_enable(_adb_dev->ep_out);
+		if(ret){
+			printk("adb_release, usb_ep_enable fail,%d\n", ret);
+			WARN_ON(1);
+		}
+
+	}
+	adb_unlock(&_adb_dev->open_excl);
+	return 0;
+}
+
+/* file operations for ADB device /dev/android_adb */
+static const struct file_operations adb_fops = {
+	.owner = THIS_MODULE,
+	.read = adb_read,
+	.write = adb_write,
+	.open = adb_open,
+	.release = adb_release,
+};
+
+static struct miscdevice adb_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = adb_shortname,
+	.fops = &adb_fops,
+};
+
+
+
+
+static int
+adb_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct adb_dev	*dev = func_to_adb(f);
+	int			id;
+	int			ret;
+	USBSTACK_DBG("%s", __func__);
+	dev->cdev = cdev;
+	DBG(cdev, "adb_function_bind dev: %p\n", dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	adb_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = adb_create_bulk_endpoints(dev, &adb_fullspeed_in_desc,
+			&adb_fullspeed_out_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		adb_highspeed_in_desc.bEndpointAddress =
+			adb_fullspeed_in_desc.bEndpointAddress;
+		adb_highspeed_out_desc.bEndpointAddress =
+			adb_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+adb_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct adb_dev	*dev = func_to_adb(f);
+	struct usb_request *req;
+
+	USBSTACK_DBG("%s", __func__);
+	dev->online = 0;
+	dev->error = 1;
+
+	wake_up(&dev->read_wq);
+
+	adb_request_free(dev->rx_req, dev->ep_out);
+	while ((req = adb_req_get(dev, &dev->tx_idle)))
+		adb_request_free(req, dev->ep_in);
+}
+
+static int adb_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct adb_dev	*dev = func_to_adb(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt);
+	USBSTACK_DBG("%s", __func__);
+	#ifdef CONFIG_PM
+	if(dev->suspend_state == 1)
+		dev->suspend_state = 0;
+	#endif
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	if(adb_lock(&dev->enable_excl))
+		return 0;
+		
+	if(atomic_read(&dev->open_excl)==1){
+		dev->online = 1;
+		dev->error= 0;		
+		/* readers may be blocked waiting for us to go online */
+		wake_up(&dev->read_wq);
+	}
+	
+	return 0;
+}
+
+static void adb_function_disable(struct usb_function *f)
+{
+	struct adb_dev	*dev = func_to_adb(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	USBSTACK_DBG("%s", __func__);
+
+	if(atomic_read(&dev->enable_excl)==0)
+		return;
+	else
+		adb_unlock(&dev->enable_excl);
+
+#if 0
+	if(dev->suspend_state == 1)
+		return;
+#endif
+
+	DBG(cdev, "adb_function_disable cdev %p\n", cdev);
+	dev->online = 0;
+	//dev->error = 1;
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+#ifdef CONFIG_PM
+unsigned int g_adb_suspend_cnt = 0;
+unsigned int g_adb_resume_cnt = 0;
+static void adb_function_suspend(struct usb_function *f)
+{
+	struct adb_dev	*dev = func_to_adb(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+    g_adb_suspend_cnt++;
+	usb_printk("%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_adb_suspend_cnt);
+    USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_adb_suspend_cnt);
+	dev->suspend_state = 1;
+//	adb_function_disable(f);
+	usb_ep_disable(dev->ep_in);
+//	usb_ep_disable(dev->ep_out);
+}
+static void adb_function_resume(struct usb_function *f)
+{
+	struct adb_dev	*dev = func_to_adb(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+    g_adb_resume_cnt++;
+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_adb_resume_cnt);
+    USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_adb_resume_cnt);
+//	adb_function_set_alt(f, NULL, NULL);
+	//usb_ep_enable(dev->ep_in);
+	usb_ep_resume_enable(dev->ep_in);
+//	usb_ep_enable(dev->ep_out);
+	dev->suspend_state = 0;
+}
+#endif
+static int adb_bind_config(struct usb_configuration *c)
+{
+	struct adb_dev *dev = _adb_dev;
+
+	usb_printk("adb_bind_config\n");
+
+	dev->cdev = c->cdev;
+	dev->function.name = "adb";
+	dev->function.descriptors = fs_adb_descs;
+	dev->function.hs_descriptors = hs_adb_descs;
+	dev->function.bind = adb_function_bind;
+	dev->function.unbind = adb_function_unbind;
+	dev->function.set_alt = adb_function_set_alt;
+	dev->function.disable = adb_function_disable;
+#ifdef CONFIG_PM
+    dev->function.suspend = adb_function_suspend;
+    dev->function.resume = adb_function_resume;
+#endif
+	return usb_add_function(c, &dev->function);
+}
+
+static int adb_setup(void)
+{
+	struct adb_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->read_excl, 0);
+	atomic_set(&dev->write_excl, 0);
+	atomic_set(&dev->enable_excl, 0);
+
+	INIT_LIST_HEAD(&dev->tx_idle);
+
+	_adb_dev = dev;
+
+	ret = misc_register(&adb_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	printk(KERN_ERR "adb gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void adb_cleanup(void)
+{
+	misc_deregister(&adb_device);
+
+	kfree(_adb_dev);
+	_adb_dev = NULL;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_audio_source.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_audio_source.c
new file mode 100644
index 0000000..c757409
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_audio_source.c
@@ -0,0 +1,828 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 384
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE	0
+#define AUDIO_AS_INTERFACE	1
+#define AUDIO_NUM_INTERFACES	2
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH	UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+	+ UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+	+ UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+	.bLength =		UAC_DT_AC_HEADER_LENGTH,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_HEADER,
+	.bcdADC =		__constant_cpu_to_le16(0x0100),
+	.wTotalLength =		__constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+	.bInCollection =	AUDIO_NUM_INTERFACES,
+	.baInterfaceNr = {
+		[0] =		AUDIO_AC_INTERFACE,
+		[1] =		AUDIO_AS_INTERFACE,
+	}
+};
+
+#define INPUT_TERMINAL_ID	1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+	.bLength =		UAC_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
+	.bTerminalID =		INPUT_TERMINAL_ID,
+	.wTerminalType =	UAC_INPUT_TERMINAL_MICROPHONE,
+	.bAssocTerminal =	0,
+	.wChannelConfig =	0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID		2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+	.bLength		= UAC_DT_FEATURE_UNIT_SIZE(0),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_FEATURE_UNIT,
+	.bUnitID		= FEATURE_UNIT_ID,
+	.bSourceID		= INPUT_TERMINAL_ID,
+	.bControlSize		= 2,
+};
+
+#define OUTPUT_TERMINAL_ID	3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+	.bLength		= UAC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
+	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.wTerminalType		= UAC_TERMINAL_STREAMING,
+	.bAssocTerminal		= FEATURE_UNIT_ID,
+	.bSourceID		= FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+	.bLength =		UAC_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_AS_GENERAL,
+	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bDelay =		1,
+	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
+	.bFormatType =		UAC_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	UAC_EP_GENERAL,
+	.bmAttributes =		1,
+	.bLockDelayUnits =	1,
+	.wLockDelay =		__constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&hs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&fs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+	.info =			SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_BATCH |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE,
+	.channels_min		= 2,
+	.channels_max		= 2,
+	.rate_min		= SAMPLE_RATE,
+	.rate_max		= SAMPLE_RATE,
+
+	.buffer_bytes_max =	1024 * 1024,
+	.period_bytes_min =	64,
+	.period_bytes_max =	512 * 1024,
+	.periods_min =		2,
+	.periods_max =		1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+	int	card;
+	int	device;
+};
+
+struct audio_dev {
+	struct usb_function		func;
+	struct snd_card			*card;
+	struct snd_pcm			*pcm;
+	struct snd_pcm_substream *substream;
+
+	struct list_head		idle_reqs;
+	struct usb_ep			*in_ep;
+
+	spinlock_t			lock;
+
+	/* beginning, end and current position in our buffer */
+	void				*buffer_start;
+	void				*buffer_end;
+	void				*buffer_pos;
+
+	/* byte size of a "period" */
+	unsigned int			period;
+	/* bytes sent since last call to snd_pcm_period_elapsed */
+	unsigned int			period_offset;
+	/* time we started playing */
+	ktime_t				start_time;
+	/* number of frames sent since start_time */
+	s64				frames_sent;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+	return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+	req->length = buffer_size;
+	return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	list_add_tail(&req->list, &audio->idle_reqs);
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	if (list_empty(&audio->idle_reqs)) {
+		req = 0;
+	} else {
+		req = list_first_entry(&audio->idle_reqs, struct usb_request,
+				list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&audio->lock, flags);
+	return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+	struct snd_pcm_runtime *runtime;
+	struct usb_request *req;
+	int length, length1, length2, ret;
+	s64 msecs;
+	s64 frames;
+	ktime_t now;
+
+	/* audio->substream will be null if we have been closed */
+	if (!audio->substream)
+		return;
+	/* audio->buffer_pos will be null if we have been stopped */
+	if (!audio->buffer_pos)
+		return;
+
+	runtime = audio->substream->runtime;
+
+	/* compute number of frames to send */
+	now = ktime_get();
+	msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+	do_div(msecs, 1000000);
+	frames = msecs * SAMPLE_RATE;
+	do_div(frames, 1000);
+
+	/* Readjust our frames_sent if we fall too far behind.
+	 * If we get too far behind it is better to drop some frames than
+	 * to keep sending data too fast in an attempt to catch up.
+	 */
+	if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+		audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+	frames -= audio->frames_sent;
+
+	/* We need to send something to keep the pipeline going */
+	if (frames <= 0)
+		frames = FRAMES_PER_MSEC;
+
+	while (frames > 0) {
+		req = audio_req_get(audio);
+		if (!req)
+			break;
+
+		length = frames_to_bytes(runtime, frames);
+		if (length > IN_EP_MAX_PACKET_SIZE)
+			length = IN_EP_MAX_PACKET_SIZE;
+
+		if (audio->buffer_pos + length > audio->buffer_end)
+			length1 = audio->buffer_end - audio->buffer_pos;
+		else
+			length1 = length;
+		memcpy(req->buf, audio->buffer_pos, length1);
+		if (length1 < length) {
+			/* Wrap around and copy remaining length
+			 * at beginning of buffer.
+			 */
+			length2 = length - length1;
+			memcpy(req->buf + length1, audio->buffer_start,
+					length2);
+			audio->buffer_pos = audio->buffer_start + length2;
+		} else {
+			audio->buffer_pos += length1;
+			if (audio->buffer_pos >= audio->buffer_end)
+				audio->buffer_pos = audio->buffer_start;
+		}
+
+		req->length = length;
+		ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+		if (ret < 0) {
+			pr_err("usb_ep_queue failed ret: %d\n", ret);
+			audio_req_put(audio, req);
+			break;
+		}
+
+		frames -= bytes_to_frames(runtime, length);
+		audio->frames_sent += bytes_to_frames(runtime, length);
+	}
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	/* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct audio_dev *audio = req->context;
+
+	pr_debug("audio_data_complete req->status %d req->actual %d\n",
+		req->status, req->actual);
+
+	audio_req_put(audio, req);
+
+	if (!audio->buffer_start || req->status)
+		return;
+
+	audio->period_offset += req->actual;
+	if (audio->period_offset >= audio->period) {
+		snd_pcm_period_elapsed(audio->substream);
+		audio->period_offset = 0;
+	}
+	audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	int value = -EOPNOTSUPP;
+	u16 ep = le16_to_cpu(ctrl->wIndex);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_SET_CUR:
+	case UAC_SET_MIN:
+	case UAC_SET_MAX:
+	case UAC_SET_RES:
+		value = len;
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u8 *buf = cdev->req->buf;
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+		switch (ctrl->bRequest) {
+		case UAC_GET_CUR:
+		case UAC_GET_MIN:
+		case UAC_GET_MAX:
+		case UAC_GET_RES:
+			/* return our sample rate */
+			buf[0] = (u8)SAMPLE_RATE;
+			buf[1] = (u8)(SAMPLE_RATE >> 8);
+			buf[2] = (u8)(SAMPLE_RATE >> 16);
+			value = 3;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request *req = cdev->req;
+	int value = -EOPNOTSUPP;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything; interface
+	 * activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_set_endpoint_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_get_endpoint_req(f, ctrl);
+		break;
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		req->complete = audio_control_complete;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+	if (ret)
+		return ret;
+
+	usb_ep_enable(audio->in_ep);
+	return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+	struct audio_dev	*audio = func_to_audio(f);
+
+	pr_debug("audio_disable\n");
+	usb_ep_disable(audio->in_ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+	u8 *sam_freq;
+	int rate;
+
+	/* Set channel numbers */
+	input_terminal_desc.bNrChannels = 2;
+	as_type_i_desc.bNrChannels = 2;
+
+	/* Set sample rates */
+	rate = SAMPLE_RATE;
+	sam_freq = as_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+}
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct audio_dev *audio = func_to_audio(f);
+	int status;
+	struct usb_ep *ep;
+	struct usb_request *req;
+	int i;
+
+	audio_build_desc(audio);
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ac_interface_desc.bInterfaceNumber = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	as_interface_alt_0_desc.bInterfaceNumber = status;
+	as_interface_alt_1_desc.bInterfaceNumber = status;
+
+	status = -ENODEV;
+
+	/* allocate our endpoint */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->in_ep = ep;
+	ep->driver_data = audio; /* claim */
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		hs_as_in_ep_desc.bEndpointAddress =
+			fs_as_in_ep_desc.bEndpointAddress;
+
+	f->descriptors = fs_audio_desc;
+	f->hs_descriptors = hs_audio_desc;
+
+	for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+		req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+		if (req) {
+			req->context = audio;
+			req->complete = audio_data_complete;
+			audio_req_put(audio, req);
+		} else
+			status = -ENOMEM;
+	}
+
+fail:
+	return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_request *req;
+
+	while ((req = audio_req_get(audio)))
+		audio_request_free(req, audio->in_ep);
+
+	snd_card_free_when_closed(audio->card);
+	audio->card = NULL;
+	audio->pcm = NULL;
+	audio->substream = NULL;
+	audio->in_ep = NULL;
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+	audio->start_time = ktime_get();
+	audio->frames_sent = 0;
+	audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	audio->buffer_start = 0;
+	audio->buffer_end = 0;
+	audio->buffer_pos = 0;
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = substream->private_data;
+
+	runtime->private_data = audio;
+	runtime->hw = audio_hw_info;
+	snd_pcm_limit_hw_rates(runtime);
+	runtime->hw.channels_max = 2;
+
+	audio->substream = substream;
+	return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct audio_dev *audio = substream->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	audio->substream = NULL;
+	spin_unlock_irqrestore(&audio->lock, flags);
+
+	return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	unsigned int channels = params_channels(params);
+	unsigned int rate = params_rate(params);
+
+	if (rate != SAMPLE_RATE)
+		return -EINVAL;
+	if (channels != 2)
+		return -EINVAL;
+
+	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+		params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+
+	audio->period = snd_pcm_lib_period_bytes(substream);
+	audio->period_offset = 0;
+	audio->buffer_start = runtime->dma_area;
+	audio->buffer_end = audio->buffer_start
+		+ snd_pcm_lib_buffer_bytes(substream);
+	audio->buffer_pos = audio->buffer_start;
+
+	return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+	ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+	/* return offset of next frame to fill in our buffer */
+	return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct audio_dev *audio = substream->runtime->private_data;
+	int ret = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		audio_pcm_playback_start(audio);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		audio_pcm_playback_stop(audio);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct audio_dev _audio_dev = {
+	.func = {
+		.name = "audio_source",
+		.bind = audio_bind,
+		.unbind = audio_unbind,
+		.set_alt = audio_set_alt,
+		.setup = audio_setup,
+		.disable = audio_disable,
+	},
+	.lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+	.idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+	.open		= audio_pcm_open,
+	.close		= audio_pcm_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= audio_pcm_hw_params,
+	.hw_free	= audio_pcm_hw_free,
+	.prepare	= audio_pcm_prepare,
+	.trigger	= audio_pcm_playback_trigger,
+	.pointer	= audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+		struct audio_source_config *config)
+{
+	struct audio_dev *audio;
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int err;
+
+	config->card = -1;
+	config->device = -1;
+
+	audio = &_audio_dev;
+
+	err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+			THIS_MODULE, 0, &card);
+	if (err)
+		return err;
+
+	snd_card_set_dev(card, &c->cdev->gadget->dev);
+
+	err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+	if (err)
+		goto pcm_fail;
+	pcm->private_data = audio;
+	pcm->info_flags = 0;
+	audio->pcm = pcm;
+
+	strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+				NULL, 0, 64 * 1024);
+
+	strlcpy(card->driver, "audio_source", sizeof(card->driver));
+	strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+	strlcpy(card->longname, "USB accessory audio source",
+		sizeof(card->longname));
+
+	err = snd_card_register(card);
+	if (err)
+		goto register_fail;
+
+	err = usb_add_function(c, &audio->func);
+	if (err)
+		goto add_fail;
+
+	config->card = pcm->card->number;
+	config->device = pcm->device;
+	audio->card = card;
+	return 0;
+
+add_fail:
+register_fail:
+pcm_fail:
+	snd_card_free(audio->card);
+	return err;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_diag.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_diag.c
new file mode 100644
index 0000000..4875994
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_diag.c
@@ -0,0 +1,367 @@
+/*
+ * f_diag.c - USB diag function driver
+ *
+ * Copyright (C) 2019 by ZTE Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include "u_diag.h"
+#include "gadget_chips.h"
+
+
+/*
+ * This function packages a simple "diag" port with no real
+ * control mechanisms, just raw data transfer over two bulk endpoints.
+ *
+ * Because it's not standardized, this isn't as interoperable as the
+ * CDC ACM driver.  However, for many purposes it's just as functional
+ * if you can arrange appropriate host side drivers.
+ */
+struct f_diag {
+	struct u_diag   port;
+	u8				data_id;
+	u8				port_num;
+};
+
+static inline struct f_diag *func_to_diag(struct usb_function *f)
+{
+	return container_of(f, struct f_diag, port.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor diag_interface_desc  = {
+
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_SUBCLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor diag_fs_in_desc  = {
+
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor diag_fs_out_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *diag_fs_function[] = {
+	(struct usb_descriptor_header *) &diag_interface_desc,
+	(struct usb_descriptor_header *) &diag_fs_in_desc,
+	(struct usb_descriptor_header *) &diag_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor diag_hs_in_desc  = {
+
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor diag_hs_out_desc  = {
+
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *diag_hs_function[]  = {
+
+	(struct usb_descriptor_header *) &diag_interface_desc,
+	(struct usb_descriptor_header *) &diag_hs_in_desc,
+	(struct usb_descriptor_header *) &diag_hs_out_desc,
+	NULL,
+};
+
+static struct usb_endpoint_descriptor diag_ss_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor diag_ss_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor diag_ss_bulk_comp_desc __initdata = {
+	.bLength =              sizeof diag_ss_bulk_comp_desc,
+	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *diag_ss_function[] __initdata = {
+	(struct usb_descriptor_header *) &diag_interface_desc,
+	(struct usb_descriptor_header *) &diag_ss_in_desc,
+	(struct usb_descriptor_header *) &diag_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &diag_ss_out_desc,
+	(struct usb_descriptor_header *) &diag_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string diag_string_defs[] = {
+	[0].s = "Diag",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings diag_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		diag_string_defs,
+};
+
+static struct usb_gadget_strings *diag_strings[] = {
+	&diag_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int diag_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_diag		*diag = func_to_diag(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+#ifdef CONFIG_PM
+	if(diag->port.suspend_state == 1)
+		diag->port.suspend_state = 0;
+#endif	
+	/* we know alt == 0, so this is an activation or a reset */
+
+	if (diag->port.in->driver_data) {
+		diag_disconnect(&diag->port);
+	}
+	if (!diag->port.in->desc || !diag->port.out->desc) {
+		if (config_ep_by_speed(cdev->gadget, f, diag->port.in) ||
+		    config_ep_by_speed(cdev->gadget, f, diag->port.out)) {
+			diag->port.in->desc = NULL;
+			diag->port.out->desc = NULL;
+			return -EINVAL;
+		}
+	}
+	diag_connect(&diag->port, diag->port_num);
+	return 0;
+}
+
+static void diag_disable(struct usb_function *f)
+{
+	struct f_diag	*diag = func_to_diag(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+#if 0
+	if(diag->port.suspend_state == 1)
+		return;
+#endif
+	diag_disconnect(&diag->port);
+}
+
+#ifdef CONFIG_PM
+unsigned int g_diag_suspend_cnt = 0;
+unsigned int g_diag_resume_cnt = 0;
+static void diag_suspend(struct usb_function *f)
+{
+	struct f_diag	*diag = func_to_diag(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+    g_diag_suspend_cnt++;
+	usb_printk( "%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_diag_suspend_cnt);
+	USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_diag_suspend_cnt);
+//	diag_disable(f);
+ 	//diagial_disconnect_ext(&diag->port);
+    diag->port.suspend_state = 1;
+	diag_disconnect_ext(&diag->port);
+	
+    usb_ep_disable((&diag->port)->in);
+}
+static void diag_resume(struct usb_function *f)
+{
+	struct f_diag	*diag = func_to_diag(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+    g_diag_resume_cnt++;
+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_diag_resume_cnt);
+	USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_diag_resume_cnt);
+//	diag_set_alt(f, NULL, NULL);
+	//usb_ep_enable((&diag->port)->in);
+	usb_ep_resume_enable((&diag->port)->in);
+//    usb_ep_enable((&diag->port)->out);
+ 	//diagial_connect_ext(&diag->port);
+	diag->port.suspend_state = 0;
+	diag_connect_ext(&diag->port);
+
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* serial function driver setup/binding */
+
+//static int __init
+static int 
+diag_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_diag		*diag = func_to_diag(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	diag->data_id = status;
+	diag_interface_desc.bInterfaceNumber = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &diag_fs_in_desc);
+	if (!ep)
+		goto fail;
+	diag->port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &diag_fs_out_desc);
+	if (!ep)
+		goto fail;
+	diag->port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(diag_fs_function);
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		diag_hs_in_desc.bEndpointAddress =
+				diag_fs_in_desc.bEndpointAddress;
+		diag_hs_out_desc.bEndpointAddress =
+				diag_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(diag_hs_function);
+	}
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		diag_ss_in_desc.bEndpointAddress =
+			diag_fs_in_desc.bEndpointAddress;
+		diag_ss_out_desc.bEndpointAddress =
+			diag_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(diag_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	/* we might as well release our claims on endpoints */
+	if (diag->port.out)
+		diag->port.out->driver_data = NULL;
+	if (diag->port.in)
+		diag->port.in->driver_data = NULL;
+
+	return status;
+}
+
+static void
+diag_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	usb_free_descriptors(f->descriptors);
+	kfree(func_to_diag(f));
+}
+
+/**
+ * diag_bind_config - add a diag function to a configuration
+ * @c: the configuration to support the diag instance
+ * @port_num: 
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @diagial_setup() with enough ports to
+ * handle all the ones it binds.  Caller is also responsible
+ * for calling @diagial_cleanup() before module unload.
+ */
+//int __init diag_bind_config(struct usb_configuration *c, u8 port_num)
+int  diag_bind_config(struct usb_configuration *c, u8 port_num)
+
+{
+	struct f_diag	*diag;
+	int		status;
+
+	/* REVISIT might want instance-specific strings to help
+	 * distinguish instances ...
+	 */
+	 USB_DEBUG("PORT_NUM:%d", port_num);
+
+	/* maybe allocate device-global string ID */
+	if (diag_string_defs[0].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		diag_string_defs[0].id = status;
+	}
+
+	USB_DEBUG("MALLOC before");
+
+	/* allocate and initialize one new instance */
+	diag = kzalloc(sizeof *diag, GFP_KERNEL);
+	if (!diag)
+		return -ENOMEM;
+	USB_DEBUG("MALLOC end");
+
+	diag->port_num = port_num;
+
+	diag->port.func.name = "diag";
+	diag->port.func.strings = diag_strings;
+	diag->port.func.bind = diag_bind;
+	diag->port.func.unbind = diag_unbind;
+	diag->port.func.set_alt = diag_set_alt;
+	diag->port.func.disable = diag_disable;
+#ifdef CONFIG_PM
+    diag->port.func.suspend = diag_suspend;
+    diag->port.func.resume = diag_resume;
+#endif
+
+	status = usb_add_function(c, &diag->port.func);
+	if (status)
+		kfree(diag);
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_diag_acm.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_diag_acm.c
new file mode 100755
index 0000000..2e6e089
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_diag_acm.c
@@ -0,0 +1,869 @@
+/*
+ * f_diag_acm.c -- USB CDC serial (diag_acm) function driver
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 2009 by Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/android_notify.h>
+
+#include "u_diag.h"
+#include "gadget_chips.h"
+
+#ifndef CONFIG_SYSTEM_RECOVERY
+extern int zDrvNand_WriteBootflag( int flag );
+#endif
+/*
+ * This CDC ACM function support just wraps control functions and
+ * notifications around the generic serial-over-usb code.
+ *
+ * Because CDC ACM is standardized by the USB-IF, many host operating
+ * systems have drivers for it.  Accordingly, ACM is the preferred
+ * interop solution for serial-port type connections.  The control
+ * models are often not necessary, and in any case don't do much in
+ * this bare-bones implementation.
+ *
+ * Note that even MS-Windows has some support for ACM.  However, that
+ * support is somewhat broken because when you use ACM in a composite
+ * device, having multiple interfaces confuses the poor OS.  It doesn't
+ * seem to understand CDC Union descriptors.  The new "association"
+ * descriptors (roughly equivalent to CDC Unions) may sometimes help.
+ */
+
+struct f_diag_acm {
+	struct u_diag			port;
+	u8				ctrl_id, data_id;
+	u8				port_num;
+
+	u8				pending;
+
+	/* lock is mostly for pending and notify_req ... they get accessed
+	 * by callbacks both from tty (open/close/break) under its spinlock,
+	 * and notify_req.complete() which can't use that lock.
+	 */
+	spinlock_t			lock;
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+
+	struct usb_cdc_line_coding	port_line_coding;	/* 8-N-1 etc */
+
+	/* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
+	u16				port_handshake_bits;
+#define ACM_CTRL_RTS	(1 << 1)	/* unused with full duplex */
+#define ACM_CTRL_DTR	(1 << 0)	/* host is ready for data r/w */
+
+	/* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
+	u16				serial_state;
+#define ACM_CTRL_OVERRUN	(1 << 6)
+#define ACM_CTRL_PARITY		(1 << 5)
+#define ACM_CTRL_FRAMING	(1 << 4)
+#define ACM_CTRL_RI		(1 << 3)
+#define ACM_CTRL_BRK		(1 << 2)
+#define ACM_CTRL_DSR		(1 << 1)
+#define ACM_CTRL_DCD		(1 << 0)
+	int switch_falg; 		
+};
+
+static inline struct f_diag_acm *func_to_diag_acm(struct usb_function *f)
+{
+	return container_of(f, struct f_diag_acm, port.func);
+}
+
+static inline struct f_diag_acm *port_to_diag_acm(struct u_diag *p)
+{
+	return container_of(p, struct f_diag_acm, port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* notification endpoint uses smallish and infrequent fixed-size messages */
+
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		10	/* notification + 2 bytes */
+
+/* interface and class descriptors: */
+
+static struct usb_interface_assoc_descriptor
+diag_iad_descriptor = {
+	.bLength =		sizeof diag_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	/* .bFirstInterface =	DYNAMIC, */
+	.bInterfaceCount = 	2,	// control + data
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_ACM,
+	.bFunctionProtocol =	USB_CDC_ACM_PROTO_AT_V25TER,
+	/* .iFunction =		DYNAMIC */
+};
+
+
+static struct usb_interface_descriptor diag_control_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ACM,
+	.bInterfaceProtocol =	USB_CDC_ACM_PROTO_AT_V25TER,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_interface_descriptor diag_data_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc diag_header_desc = {
+	.bLength =		sizeof(diag_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor
+diag_call_mgmt_descriptor = {
+	.bLength =		sizeof(diag_call_mgmt_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities =	0,
+	/* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor diag_descriptor = {
+	.bLength =		sizeof(diag_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+	.bmCapabilities =	USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc diag_union_desc = {
+	.bLength =		sizeof(diag_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor diag_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor diag_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor diag_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *diag_fs_function[] = {
+	(struct usb_descriptor_header *) &diag_iad_descriptor,
+	(struct usb_descriptor_header *) &diag_control_interface_desc,
+	(struct usb_descriptor_header *) &diag_header_desc,
+	(struct usb_descriptor_header *) &diag_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &diag_descriptor,
+	(struct usb_descriptor_header *) &diag_union_desc,
+	(struct usb_descriptor_header *) &diag_fs_notify_desc,
+	(struct usb_descriptor_header *) &diag_data_interface_desc,
+	(struct usb_descriptor_header *) &diag_fs_in_desc,
+	(struct usb_descriptor_header *) &diag_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor diag_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_endpoint_descriptor diag_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor diag_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *diag_hs_function[] = {
+	(struct usb_descriptor_header *) &diag_iad_descriptor,
+	(struct usb_descriptor_header *) &diag_control_interface_desc,
+	(struct usb_descriptor_header *) &diag_header_desc,
+	(struct usb_descriptor_header *) &diag_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &diag_descriptor,
+	(struct usb_descriptor_header *) &diag_union_desc,
+	(struct usb_descriptor_header *) &diag_hs_notify_desc,
+	(struct usb_descriptor_header *) &diag_data_interface_desc,
+	(struct usb_descriptor_header *) &diag_hs_in_desc,
+	(struct usb_descriptor_header *) &diag_hs_out_desc,
+	NULL,
+};
+
+static struct usb_endpoint_descriptor diag_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor diag_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor diag_ss_bulk_comp_desc = {
+	.bLength =              sizeof diag_ss_bulk_comp_desc,
+	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *diag_ss_function[] = {
+	(struct usb_descriptor_header *) &diag_iad_descriptor,
+	(struct usb_descriptor_header *) &diag_control_interface_desc,
+	(struct usb_descriptor_header *) &diag_header_desc,
+	(struct usb_descriptor_header *) &diag_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &diag_descriptor,
+	(struct usb_descriptor_header *) &diag_union_desc,
+	(struct usb_descriptor_header *) &diag_hs_notify_desc,
+	(struct usb_descriptor_header *) &diag_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &diag_data_interface_desc,
+	(struct usb_descriptor_header *) &diag_ss_in_desc,
+	(struct usb_descriptor_header *) &diag_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &diag_ss_out_desc,
+	(struct usb_descriptor_header *) &diag_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+#define DIAG_CTRL_IDX	0
+#define DIAG_DATA_IDX	1
+#define DIAG_IAD_IDX	2
+
+/* static strings, in UTF-8 */
+static struct usb_string diag_string_defs[] = {
+	[DIAG_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
+	[DIAG_DATA_IDX].s = "CDC ACM Data",
+	[DIAG_IAD_IDX ].s = "CDC Serial",
+	{  }, /* end of list */
+};
+
+static struct usb_gadget_strings diag_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		diag_string_defs,
+};
+
+static struct usb_gadget_strings *diag_strings[] = {
+	&diag_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* ACM control ... data handling is delegated to tty library code.
+ * The main task of this function is to activate and deactivate
+ * that code based on device state; track parameters like line
+ * speed, handshake state, and so on; and issue notifications.
+ */
+
+static void diag_complete_set_line_coding(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct f_diag_acm	*diag_acm = ep->driver_data;
+	struct usb_composite_dev *cdev = diag_acm->port.func.config->cdev;
+
+	if (req->status != 0) {
+		usb_printk( "diag_acm ttyGS%d completion, err %d\n",
+				diag_acm->port_num, req->status);
+		return;
+	}
+
+	/* normal completion */
+	if (req->actual != sizeof(diag_acm->port_line_coding)) {
+		usb_printk( "diag_acm ttyGS%d short resp, len %d\n",
+				diag_acm->port_num, req->actual);
+		usb_ep_set_halt(ep);
+	} else {
+		struct usb_cdc_line_coding	*value = req->buf;
+
+		/* REVISIT:  we currently just remember this data.
+		 * If we change that, (a) validate it first, then
+		 * (b) update whatever hardware needs updating,
+		 * (c) worry about locking.  This is information on
+		 * the order of 9600-8-N-1 ... most of which means
+		 * nothing unless we control a real RS232 line.
+		 */
+		diag_acm->port_line_coding = *value;
+		printk("diag_acm_complete_set_line_coding, datarate:%d, char:%d, parity:%d, databit:%d\n", value->dwDTERate,  value->bCharFormat, value->bParityType, value->bDataBits);
+		u32 baud = get_unaligned_le32(&value->dwDTERate);
+		if(baud == 4000000){
+			if(diag_acm->switch_falg >= 1){
+				return;
+			}
+			diag_acm->switch_falg = 1;
+			printk("close DL & switch to user mode\n");
+#ifndef CONFIG_SYSTEM_RECOVERY
+			zDrvNand_WriteBootflag(1);
+#endif
+			usb_notify_up(USB_SWITCH_USER, NULL);	
+			//usb_notify_up(USB_DEVICE_EXCEPT_RESET, NULL);		
+		}
+	}
+}
+
+static int diag_acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_diag_acm		*diag_acm = func_to_diag_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 *
+	 * Note CDC spec table 4 lists the ACM request profile.  It requires
+	 * encapsulated command support ... we don't handle any, and respond
+	 * to them by stalling.  Options include get/set/clear comm features
+	 * (not that useful) and SEND_BREAK.
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* SET_LINE_CODING ... just read and save what the host sends */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_LINE_CODING:
+		if (w_length != sizeof(struct usb_cdc_line_coding)
+				|| w_index != diag_acm->ctrl_id)
+			goto invalid;
+
+		value = w_length;
+		cdev->gadget->ep0->driver_data = diag_acm;
+		req->complete = diag_complete_set_line_coding;
+		break;
+
+	/* GET_LINE_CODING ... return what host sent, or initial value */
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_GET_LINE_CODING:
+		if (w_index != diag_acm->ctrl_id)
+			goto invalid;
+
+		value = min_t(unsigned, w_length,
+				sizeof(struct usb_cdc_line_coding));
+		memcpy(req->buf, &diag_acm->port_line_coding, value);
+		break;
+
+	/* SET_CONTROL_LINE_STATE ... save what the host sent */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		if (w_index != diag_acm->ctrl_id){
+			printk("---SET_CONTROL_LINE_STATE, w_index:%02x, w_value:%02x\n", w_index, w_value);
+			goto invalid;
+		}
+		value = 0;
+
+		/* FIXME we should not allow data to flow until the
+		 * host sets the ACM_CTRL_DTR bit; and when it clears
+		 * that bit, we should return to that no-flow state.
+		 */
+		diag_acm->port_handshake_bits = w_value;
+		break;
+
+	default:
+invalid:
+		usb_printk( "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		usb_printk( "diag_acm ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+			diag_acm->port_num, ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			usb_printk( "diag_acm response on ttyGS%d, err %d\n",
+					diag_acm->port_num, value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int diag_acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_diag_acm		*diag_acm = func_to_diag_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret = 0;
+	/* we know alt == 0, so this is an activation or a reset */
+
+	if (intf == diag_acm->ctrl_id) {
+		if (diag_acm->notify->driver_data) {
+			usb_printk( "reset diag_acm control interface %d\n", intf);
+			usb_ep_disable(diag_acm->notify);
+		} else {
+			usb_printk( "init diag_acm ctrl interface %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, diag_acm->notify))
+				return -EINVAL;
+		}
+		ret = usb_ep_enable(diag_acm->notify);
+		diag_acm->notify->driver_data = diag_acm;
+
+	} else if (intf == diag_acm->data_id) {
+		if (diag_acm->port.in->driver_data) {
+			usb_printk( "reset diag_acm ttyGS%d\n", diag_acm->port_num);
+			diag_disconnect(&diag_acm->port);
+		}
+		if (!diag_acm->port.in->desc || !diag_acm->port.out->desc) {
+			usb_printk("activate diag_acm ttyGS%d\n", diag_acm->port_num);
+			if (config_ep_by_speed(cdev->gadget, f,
+					       diag_acm->port.in) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       diag_acm->port.out)) {
+				diag_acm->port.in->desc = NULL;
+				diag_acm->port.out->desc = NULL;
+				return -EINVAL;
+			}
+		}
+		diag_connect(&diag_acm->port, diag_acm->port_num);
+
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static void diag_acm_disable(struct usb_function *f)
+{
+	struct f_diag_acm	*diag_acm = func_to_diag_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	usb_printk( "diag_acm ttyGS%d deactivated\n", diag_acm->port_num);
+	diag_disconnect(&diag_acm->port);
+	usb_ep_disable(diag_acm->notify);
+	diag_acm->switch_falg = 0;
+	diag_acm->notify->driver_data = NULL;
+}
+#ifdef CONFIG_PM
+unsigned int g_diag_acm_suspend_cnt = 0;
+unsigned int g_diag_acm_reume_cnt = 0;
+
+static void diag_acm_suspend(struct usb_function *f)
+{
+	struct f_diag_acm	*diag_acm = func_to_diag_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+    g_diag_acm_suspend_cnt++;
+	usb_printk("%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_diag_acm_suspend_cnt);
+	USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_diag_acm_suspend_cnt);
+
+    diag_acm->port.suspend_state = 1;
+	diag_disconnect_ext(&diag_acm->port);
+	
+    usb_ep_disable((&diag_acm->port)->in);
+
+	
+}
+static void diag_acm_resume(struct usb_function *f)
+{
+	struct f_diag_acm	*diag_acm = func_to_diag_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+    g_diag_acm_reume_cnt++;
+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_diag_acm_reume_cnt);
+	USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_diag_acm_reume_cnt);
+
+	usb_ep_resume_enable((&diag_acm->port)->in);
+//    usb_ep_enable((&diag->port)->out);
+ 	//diagial_connect_ext(&diag->port);
+	diag_acm->port.suspend_state = 0;
+	diag_connect_ext(&diag_acm->port);
+	
+}
+#endif
+/*-------------------------------------------------------------------------*/
+
+/**
+ * diag_acm_cdc_notify - issue CDC notification to host
+ * @diag_acm: wraps host to be notified
+ * @type: notification type
+ * @value: Refer to cdc specs, wValue field.
+ * @data: data to be sent
+ * @length: size of data
+ * Context: irqs blocked, diag_acm->lock held, diag_acm_notify_req non-null
+ *
+ * Returns zero on success or a negative errno.
+ *
+ * See section 6.3.5 of the CDC 1.1 specification for information
+ * about the only notification we issue:  SerialState change.
+ */
+static int diag_acm_cdc_notify(struct f_diag_acm *diag_acm, u8 type, u16 value,
+		void *data, unsigned length)
+{
+	struct usb_ep			*ep = diag_acm->notify;
+	struct usb_request		*req;
+	struct usb_cdc_notification	*notify;
+	const unsigned			len = sizeof(*notify) + length;
+	void				*buf;
+	int				status;
+
+	req = diag_acm->notify_req;
+	diag_acm->notify_req = NULL;
+	diag_acm->pending = false;
+
+	req->length = len;
+	notify = req->buf;
+	buf = notify + 1;
+
+	notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	notify->bNotificationType = type;
+	notify->wValue = cpu_to_le16(value);
+	notify->wIndex = cpu_to_le16(diag_acm->ctrl_id);
+	notify->wLength = cpu_to_le16(length);
+	memcpy(buf, data, length);
+
+	/* ep_queue() can complete immediately if it fills the fifo... */
+	spin_unlock(&diag_acm->lock);
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	spin_lock(&diag_acm->lock);
+
+	if (status < 0) {
+		usb_printk("diag_acm ttyGS%d can't notify serial state, %d\n",
+				diag_acm->port_num, status);
+		diag_acm->notify_req = req;
+	}
+
+	return status;
+}
+
+static int diag_acm_notify_serial_state(struct f_diag_acm *diag_acm)
+{
+	struct usb_composite_dev *cdev = diag_acm->port.func.config->cdev;
+	int			status;
+
+	spin_lock(&diag_acm->lock);
+	if (diag_acm->notify_req) {
+		usb_printk("diag_acm ttyGS%d serial state %04x\n",
+				diag_acm->port_num, diag_acm->serial_state);
+		status = diag_acm_cdc_notify(diag_acm, USB_CDC_NOTIFY_SERIAL_STATE,
+				0, &diag_acm->serial_state, sizeof(diag_acm->serial_state));
+	} else {
+		diag_acm->pending = true;
+		status = 0;
+	}
+	spin_unlock(&diag_acm->lock);
+	return status;
+}
+
+static void diag_acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_diag_acm		*diag_acm = req->context;
+	u8			doit = false;
+
+	/* on this call path we do NOT hold the port spinlock,
+	 * which is why ACM needs its own spinlock
+	 */
+	spin_lock(&diag_acm->lock);
+	if (req->status != -ESHUTDOWN)
+		doit = diag_acm->pending;
+	diag_acm->notify_req = req;
+	spin_unlock(&diag_acm->lock);
+
+	if (doit)
+		diag_acm_notify_serial_state(diag_acm);
+}
+
+/* connect == the TTY link is open */
+
+static void diag_acm_connect(struct u_diag *port)
+{
+	struct f_diag_acm		*diag_acm = port_to_diag_acm(port);
+
+	diag_acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+	diag_acm_notify_serial_state(diag_acm);
+}
+
+static void diag_acm_disconnect(struct u_diag *port)
+{
+	struct f_diag_acm		*diag_acm = port_to_diag_acm(port);
+
+	diag_acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+	diag_acm_notify_serial_state(diag_acm);
+}
+
+static int diag_acm_send_break(struct u_diag *port, int duration)
+{
+	struct f_diag_acm		*diag_acm = port_to_diag_acm(port);
+	u16			state;
+
+	state = diag_acm->serial_state;
+	state &= ~ACM_CTRL_BRK;
+	if (duration)
+		state |= ACM_CTRL_BRK;
+
+	diag_acm->serial_state = state;
+	return diag_acm_notify_serial_state(diag_acm);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* diag_acm function driver setup/binding */
+static int
+diag_acm_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_diag_acm		*diag_acm = func_to_diag_acm(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	diag_acm->ctrl_id = status;
+	diag_iad_descriptor.bFirstInterface = status;
+
+	diag_control_interface_desc.bInterfaceNumber = status;
+	diag_union_desc .bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	diag_acm->data_id = status;
+
+	diag_data_interface_desc.bInterfaceNumber = status;
+	diag_union_desc.bSlaveInterface0 = status;
+	diag_call_mgmt_descriptor.bDataInterface = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &diag_fs_in_desc);
+	if (!ep)
+		goto fail;
+	diag_acm->port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &diag_fs_out_desc);
+	if (!ep)
+		goto fail;
+	diag_acm->port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &diag_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	diag_acm->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* allocate notification */
+	diag_acm->notify_req = gdiag_alloc_req(ep,
+			sizeof(struct usb_cdc_notification) + 2,
+			GFP_KERNEL);
+	if (!diag_acm->notify_req)
+		goto fail;
+
+	diag_acm->notify_req->complete = diag_acm_cdc_notify_complete;
+	diag_acm->notify_req->context = diag_acm;
+
+	/* copy descriptors */
+	f->descriptors = usb_copy_descriptors(diag_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		diag_hs_in_desc.bEndpointAddress =
+				diag_fs_in_desc.bEndpointAddress;
+		diag_hs_out_desc.bEndpointAddress =
+				diag_fs_out_desc.bEndpointAddress;
+		diag_hs_notify_desc.bEndpointAddress =
+				diag_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors */
+		f->hs_descriptors = usb_copy_descriptors(diag_hs_function);
+	}
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		diag_ss_in_desc.bEndpointAddress =
+			diag_fs_in_desc.bEndpointAddress;
+		diag_ss_out_desc.bEndpointAddress =
+			diag_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(diag_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	usb_printk( "diag_acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			diag_acm->port_num,
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			diag_acm->port.in->name, diag_acm->port.out->name,
+			diag_acm->notify->name);
+	return 0;
+
+fail:
+	if (diag_acm->notify_req)
+		gdiag_free_req(diag_acm->notify, diag_acm->notify_req);
+
+	/* we might as well release our claims on endpoints */
+	if (diag_acm->notify)
+		diag_acm->notify->driver_data = NULL;
+	if (diag_acm->port.out)
+		diag_acm->port.out->driver_data = NULL;
+	if (diag_acm->port.in)
+		diag_acm->port.in->driver_data = NULL;
+
+	usb_printk( "%s/%p: can't bind, err %d\n", f->name, f, status);
+
+	return status;
+}
+
+static void
+diag_acm_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_diag_acm		*diag_acm = func_to_diag_acm(f);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	usb_free_descriptors(f->descriptors);
+	gdiag_free_req(diag_acm->notify, diag_acm->notify_req);
+	kfree(diag_acm);
+}
+
+/* Some controllers can't support CDC ACM ... */
+static inline bool diag_can_support_cdc(struct usb_configuration *c)
+{
+	/* everything else is *probably* fine ... */
+	return true;
+}
+
+/**
+ * diag_acm_bind_config - add a CDC ACM function to a configuration
+ * @c: the configuration to support the CDC ACM instance
+ * @port_num: /dev/ttyGS* port this interface will use
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gserial_setup() with enough ports to
+ * handle all the ones it binds.  Caller is also responsible
+ * for calling @gserial_cleanup() before module unload.
+ */
+int diag_acm_bind_config(struct usb_configuration *c, u8 port_num)
+{
+	struct f_diag_acm	*diag_acm;
+	int		status;
+
+	if (!diag_can_support_cdc(c))
+		return -EINVAL;
+
+	/* REVISIT might want instance-specific strings to help
+	 * distinguish instances ...
+	 */
+
+	/* maybe allocate device-global string IDs, and patch descriptors */
+	if (diag_string_defs[ACM_CTRL_IDX].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		diag_string_defs[ACM_CTRL_IDX].id = status;
+
+		diag_control_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		diag_string_defs[ACM_DATA_IDX].id = status;
+
+		diag_data_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		diag_string_defs[ACM_IAD_IDX].id = status;
+
+		diag_iad_descriptor.iFunction = status;
+	}
+
+	/* allocate and initialize one new instance */
+	diag_acm = kzalloc(sizeof *diag_acm, GFP_KERNEL);
+	if (!diag_acm)
+		return -ENOMEM;
+
+	spin_lock_init(&diag_acm->lock);
+
+	diag_acm->port_num = port_num;
+
+	diag_acm->port.connect = diag_acm_connect;
+	diag_acm->port.disconnect = diag_acm_disconnect;
+	diag_acm->port.send_break = diag_acm_send_break;
+
+	diag_acm->port.func.name = "diag_acm";
+	diag_acm->port.func.strings = diag_strings;
+	/* descriptors are per-instance copies */
+	diag_acm->port.func.bind = diag_acm_bind;
+	diag_acm->port.func.unbind = diag_acm_unbind;
+	diag_acm->port.func.set_alt = diag_acm_set_alt;
+	diag_acm->port.func.setup = diag_acm_setup;
+	diag_acm->port.func.disable = diag_acm_disable;
+#if 0 //#ifdef CONFIG_PM
+	diag_acm->port.func.suspend = diag_acm_suspend;
+	diag_acm->port.func.resume = diag_acm_resume;
+#endif
+	status = usb_add_function(c, &diag_acm->port.func);
+	if (status)
+		kfree(diag_acm);
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_ecm.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_ecm.c
new file mode 100755
index 0000000..bd0914d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_ecm.c
@@ -0,0 +1,1096 @@
+/*
+ * f_ecm.c -- USB CDC Ethernet (ECM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
+ * Ethernet link.  The data transfer model is simple (packets sent and
+ * received over bulk endpoints using normal short packet termination),
+ * and the control model exposes various data and optional notifications.
+ *
+ * ECM is well standardized and (except for Microsoft) supported by most
+ * operating systems with USB host support.  It's the preferred interop
+ * solution for Ethernet over USB, at least for firmware based solutions.
+ * (Hardware solutions tend to be more minimalist.)  A newer and simpler
+ * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on.
+ *
+ * Note that ECM requires the use of "alternate settings" for its data
+ * interface.  This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ */
+
+
+enum ecm_notify_state {
+	ECM_NOTIFY_NONE,		/* don't notify */
+	ECM_NOTIFY_CONNECT,		/* issue CONNECT next */
+	ECM_NOTIFY_SPEED,		/* issue SPEED_CHANGE next */
+};
+
+struct f_ecm {
+	struct gether			port;
+	u8				ctrl_id, data_id;
+
+	char				ethaddr[14];
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	u8				notify_state;
+	bool				is_open;
+
+	/* FIXME is_open needs some irq-ish locking
+	 * ... possibly the same as port.ioport
+	 */
+
+	u8	num;	
+	struct delayed_work work;	
+	int work_init_state;
+	int work_schedule_state;
+};
+
+extern unsigned int force_net;
+
+unsigned int ecm_setup_work_time = 0;
+static inline struct f_ecm *func_to_ecm(struct usb_function *f)
+{
+	return container_of(f, struct f_ecm, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ecm_bitrate(struct usb_gadget *g)
+{
+#ifndef USB_ETHER_REPROT_MAX_MODEM_SPEED
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return 13 * 1024 * 8 * 1000 * 8;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 * 64 * 1 * 1000 * 8;
+#else
+    return LTE_CAT4_SPEED;
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Include the status endpoint if we can, even though it's optional.
+ *
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real aether
+ * can provide.  More advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+
+#define LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define ECM_STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+
+/* interface descriptor: */
+
+static struct usb_interface_assoc_descriptor
+ecm_iad_descriptor = {
+	.bLength =		sizeof ecm_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	/* .bFirstInterface =	DYNAMIC, */
+	.bInterfaceCount =	2,	/* control + data */
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
+	/* .iFunction =		DYNAMIC */
+};
+
+
+static struct usb_interface_descriptor ecm_control_intf = {
+	.bLength =		sizeof ecm_control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_header_desc = {
+	.bLength =		sizeof ecm_header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_union_desc = {
+	.bLength =		sizeof(ecm_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_desc = {
+	.bLength =		sizeof ecm_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+
+	/* this descriptor actually adds value, surprise! */
+	/* .iMACAddress = DYNAMIC */
+	.bmEthernetStatistics =	cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_data_nop_intf = {
+	.bLength =		sizeof ecm_data_nop_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_data_intf = {
+	.bLength =		sizeof ecm_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_ecm_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_STATUS_BYTECOUNT),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor fs_ecm_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_ecm_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_fs_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_iad_descriptor,
+	(struct usb_descriptor_header *) &ecm_control_intf,
+	(struct usb_descriptor_header *) &ecm_header_desc,
+	(struct usb_descriptor_header *) &ecm_union_desc,
+	(struct usb_descriptor_header *) &ecm_desc,
+
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &fs_ecm_notify_desc,
+
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_data_intf,
+	(struct usb_descriptor_header *) &fs_ecm_in_desc,
+	(struct usb_descriptor_header *) &fs_ecm_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_ecm_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_endpoint_descriptor hs_ecm_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_ecm_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_hs_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_iad_descriptor,
+	(struct usb_descriptor_header *) &ecm_control_intf,
+	(struct usb_descriptor_header *) &ecm_header_desc,
+	(struct usb_descriptor_header *) &ecm_union_desc,
+	(struct usb_descriptor_header *) &ecm_desc,
+
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &hs_ecm_notify_desc,
+
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_data_intf,
+	(struct usb_descriptor_header *) &hs_ecm_in_desc,
+	(struct usb_descriptor_header *) &hs_ecm_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_ecm_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_ecm_intr_comp_desc = {
+	.bLength =		sizeof ss_ecm_intr_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(ECM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_ecm_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor ss_ecm_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_ecm_bulk_comp_desc = {
+	.bLength =		sizeof ss_ecm_bulk_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *ecm_ss_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_control_intf,
+	(struct usb_descriptor_header *) &ecm_header_desc,
+	(struct usb_descriptor_header *) &ecm_union_desc,
+	(struct usb_descriptor_header *) &ecm_desc,
+
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &ss_ecm_notify_desc,
+	(struct usb_descriptor_header *) &ss_ecm_intr_comp_desc,
+
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_data_intf,
+	(struct usb_descriptor_header *) &ss_ecm_in_desc,
+	(struct usb_descriptor_header *) &ss_ecm_bulk_comp_desc,
+	(struct usb_descriptor_header *) &ss_ecm_out_desc,
+	(struct usb_descriptor_header *) &ss_ecm_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string ecm_string_defs[] = {
+	[0].s = "CDC Ethernet Control Model (ECM)",
+	[1].s = "CDC Ethernet Data",
+	[2].s = "CDC ECM",
+	[3].s = NULL /* DYNAMIC */,
+	[4].s = NULL /* DYNAMIC */,
+	[5].s = NULL /* DYNAMIC */,
+	[6].s = NULL /* DYNAMIC */,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		ecm_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_strings[] = {
+	&ecm_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+extern void dwc_otg_wakelock(int lock_flag,int phase);
+extern unsigned int ecm_function_work_time;
+int ecm_work_run_cnt = 0;
+static void ecm_setup_work(struct work_struct *data)
+{
+	struct f_ecm	* ecm = container_of(data, struct f_ecm, work);
+	USBSTACK_DBG("ecm setup work,run_cnt:%d",ecm_work_run_cnt);
+	printk("ecm setup work,run_cnt:%d",ecm_work_run_cnt);
+	//if(ecm_function_work_time == 0){
+	ecm->work_schedule_state = 0;
+	ecm_work_run_cnt++;
+	//ecm_setup_work_time = 0;
+	dwc_otg_wakelock(1,0);
+	usb_notify_up(USB_DEVICE_PLUGOUT, NULL);
+	usb_notify_up(USB_DEVICE_PLUGIN, NULL);
+	dwc_otg_wakelock(0,0);
+	//}
+}
+
+static void ecm_do_notify(struct f_ecm *ecm)
+{
+	struct usb_request		*req = ecm->notify_req;
+	struct usb_cdc_notification	*event;
+	struct usb_composite_dev	*cdev = ecm->port.func.config->cdev;
+	__le32				*data;
+	int				status;
+
+	/* notification already in flight? */
+	if (!req)
+		return;
+
+	event = req->buf;
+	switch (ecm->notify_state) {
+	case ECM_NOTIFY_NONE:
+		return;
+
+	case ECM_NOTIFY_CONNECT:
+		event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+		if (ecm->is_open)
+			event->wValue = cpu_to_le16(1);
+		else
+			event->wValue = cpu_to_le16(0);
+
+		event->wIndex = cpu_to_le16(ecm->ctrl_id);
+		event->wLength = 0;
+		req->length = sizeof *event;
+
+		DBG(cdev, "notify connect %s\n",
+				ecm->is_open ? "true" : "false");
+		ecm->notify_state = ECM_NOTIFY_SPEED;
+		break;
+
+	case ECM_NOTIFY_SPEED:
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = cpu_to_le16(0);
+		event->wLength = cpu_to_le16(8);
+		req->length = ECM_STATUS_BYTECOUNT;
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data = req->buf + sizeof *event;
+		data[0] = cpu_to_le32(ecm_bitrate(cdev->gadget));
+		data[1] = data[0];
+
+		DBG(cdev, "notify speed %d\n", ecm_bitrate(cdev->gadget));
+		ecm->notify_state = ECM_NOTIFY_NONE;
+		break;
+	}
+	event->bmRequestType = 0xA1;
+	event->wIndex = cpu_to_le16(ecm->ctrl_id);
+
+	ecm->notify_req = NULL;
+	status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
+	if (status < 0) {
+		ecm->notify_req = req;
+		DBG(cdev, "notify --> %d\n", status);
+	}
+}
+
+static void ecm_notify(struct f_ecm *ecm)
+{
+	/* NOTE on most versions of Linux, host side cdc-ethernet
+	 * won't listen for notifications until its netdevice opens.
+	 * The first notification then sits in the FIFO for a long
+	 * time, and the second one is queued.
+	 */
+	ecm->notify_state = ECM_NOTIFY_CONNECT;
+	ecm_do_notify(ecm);
+}
+
+static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_ecm			*ecm = req->context;
+	struct usb_composite_dev	*cdev = ecm->port.func.config->cdev;
+	struct usb_cdc_notification	*event = req->buf;
+
+	switch (req->status) {
+	case 0:
+		/* no fault */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		ecm->notify_state = ECM_NOTIFY_NONE;
+		break;
+	default:
+		DBG(cdev, "event %02x --> %d\n",
+			event->bNotificationType, req->status);
+		break;
+	}
+	ecm->notify_req = req;
+	ecm_do_notify(ecm);
+}
+
+static int ecm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_ecm		*ecm = func_to_ecm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/* see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+
+		if(force_net == 0){		 
+			if(ecm->work_schedule_state == 1){
+				cancel_delayed_work_sync(&ecm->work);
+				ecm->work_schedule_state = 0;
+				ecm_setup_work_time = 0;
+				printk("ecm_setup cancel ecm_work\n");
+			}
+		}
+		if (w_length != 0 || w_index != ecm->ctrl_id)
+			goto invalid;
+		DBG(cdev, "packet filter %02x\n", w_value);
+		/* REVISIT locking of cdc_filter.  This assumes the UDC
+		 * driver won't have a concurrent packet TX irq running on
+		 * another CPU; or that if it does, this write is atomic...
+		 */
+		ecm->port.cdc_filter = w_value;
+		value = 0;
+		break;
+
+	/* and optionally:
+	 * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+	 * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+	default:
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "ecm req %02x.%02x response err %d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_ecm		*ecm = func_to_ecm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret = 0;
+	/* Control interface has only altsetting 0 */
+	if (intf == ecm->ctrl_id) {
+		if (alt != 0)
+			goto fail;
+
+		if (ecm->notify->driver_data) {
+			VDBG(cdev, "reset ecm control %d\n", intf);
+			usb_ep_disable(ecm->notify);
+		}
+		if (!(ecm->notify->desc)) {
+			VDBG(cdev, "init ecm ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
+				goto fail;
+		}
+		ret = usb_ep_enable(ecm->notify);
+		ecm->notify->driver_data = ecm;
+
+	/* Data interface has two altsettings, 0 and 1 */
+	} else if (intf == ecm->data_id) {
+		if (alt > 1)
+			goto fail;
+
+		if (ecm->port.in_ep->driver_data) {
+			DBG(cdev, "reset ecm\n");
+			gether_disconnect(&ecm->port);
+		}
+
+		if (!ecm->port.in_ep->desc ||
+		    !ecm->port.out_ep->desc) {
+			DBG(cdev, "init ecm\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       ecm->port.in_ep) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       ecm->port.out_ep)) {
+				ecm->port.in_ep->desc = NULL;
+				ecm->port.out_ep->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* CDC Ethernet only sends data in non-default altsettings.
+		 * Changing altsettings resets filters, statistics, etc.
+		 */
+		if (alt == 1) {
+			struct net_device	*net;
+
+			/* Enable zlps by default for ECM conformance;
+			 * override for musb_hdrc (avoids txdma ovhead).
+			 */
+			ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
+				);
+			ecm->port.cdc_filter = DEFAULT_FILTER;
+			DBG(cdev, "activate ecm\n");
+			net = gether_connect_num(&ecm->port, ecm->num);
+			if (IS_ERR(net))
+				return PTR_ERR(net);
+		}
+
+		/* NOTE this can be a minor disagreement with the ECM spec,
+		 * which says speed notifications will "always" follow
+		 * connection notifications.  But we allow one connect to
+		 * follow another (if the first is in flight), and instead
+		 * just guarantee that a speed notification is always sent.
+		 */
+		ecm_notify(ecm);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+/* Because the data interface supports multiple altsettings,
+ * this ECM function *MUST* implement a get_alt() method.
+ */
+static int ecm_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_ecm		*ecm = func_to_ecm(f);
+
+	if (intf == ecm->ctrl_id)
+		return 0;
+	return ecm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static void ecm_disable(struct usb_function *f)
+{
+	struct f_ecm		*ecm = func_to_ecm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "ecm deactivated\n");
+
+	if (ecm->port.in_ep->driver_data)
+		gether_disconnect(&ecm->port);
+
+	if (ecm->notify->driver_data) {
+		usb_ep_disable(ecm->notify);
+		ecm->notify->driver_data = NULL;
+		ecm->notify->desc = NULL;
+	}
+}
+
+#ifdef CONFIG_PM
+unsigned int g_ecm_suspend_cnt = 0;
+unsigned int g_ecm_resume_cnt = 0;
+static void ecm_suspend(struct usb_function *f)
+{
+	struct f_ecm		*ecm = func_to_ecm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+    g_ecm_suspend_cnt++;
+	usb_printk("%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_ecm_suspend_cnt);
+	USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_ecm_suspend_cnt);
+    ecm->port.suspend_state = 1;
+//	gether_disconnect(&ecm->port);
+    usb_ep_disable((&ecm->port)->in_ep);
+//	usb_ep_disable((&ecm->port)->out_ep);
+    usb_ep_disable(ecm->notify);
+    gether_uevent_eth_ecm(&ecm->port,0);
+}
+
+static void ecm_resume(struct usb_function *f)
+{
+	struct f_ecm		*ecm = func_to_ecm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+    g_ecm_resume_cnt++;
+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_ecm_resume_cnt);
+	USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_ecm_resume_cnt);
+//	gether_connect(&ecm->port);
+    //usb_ep_enable(ecm->notify);
+    //usb_ep_enable((&ecm->port)->in_ep);
+	usb_ep_resume_enable(ecm->notify);
+    usb_ep_resume_enable((&ecm->port)->in_ep);
+//    usb_ep_enable((&ecm->port)->out_ep);
+    ecm->port.suspend_state = 0;
+	gether_uevent_eth_ecm(&ecm->port,1);
+}
+#endif
+
+static void ecm_resume_notify(struct usb_function *f)
+{
+	struct f_ecm		*ecm = func_to_ecm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	printk("%s, %u\n", __func__, __LINE__);
+	USBSTACK_DBG("%s, %u ", __func__, __LINE__);
+
+	if(ecm->is_open){
+		printk("ecm_resume notify again\n");	
+		ecm_notify(ecm);	
+	}
+	
+}
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ *   - disconnected/unconfigured
+ *   - configured but inactive (data alt 0)
+ *   - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting).  Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ecm_open(struct gether *geth)
+{
+	struct f_ecm		*ecm = func_to_ecm(&geth->func);
+
+	DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+	if(force_net == 0){
+		if(ecm->work_schedule_state == 1){
+			cancel_delayed_work_sync(&ecm->work);
+			ecm->work_schedule_state = 0;
+			ecm_setup_work_time = 0;
+			ecm_work_run_cnt = 0;
+			usb_printk("ecm_setup cancel ecm setup work\n");
+		}
+	}
+
+	ecm->is_open = true;
+	ecm_notify(ecm);
+}
+
+static void ecm_close(struct gether *geth)
+{
+	struct f_ecm		*ecm = func_to_ecm(&geth->func);
+
+	DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+	ecm->is_open = false;
+	ecm_notify(ecm);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+ecm_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_ecm		*ecm = func_to_ecm(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	ecm_desc.iMACAddress = ecm_string_defs[3+(ecm->num)].id;
+	
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ecm->ctrl_id = status;
+	ecm_iad_descriptor.bFirstInterface = status;
+
+	ecm_control_intf.bInterfaceNumber = status;
+	ecm_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ecm->data_id = status;
+
+	ecm_data_nop_intf.bInterfaceNumber = status;
+	ecm_data_intf.bInterfaceNumber = status;
+	ecm_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc);
+	if (!ep)
+		goto fail;
+	ecm->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc);
+	if (!ep)
+		goto fail;
+	ecm->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* NOTE:  a status/notification endpoint is *OPTIONAL* but we
+	 * don't treat it that way.  It's simpler, and some newer CDC
+	 * profiles (wireless handsets) no longer treat it as optional.
+	 */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc);
+	if (!ep)
+		goto fail;
+	ecm->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!ecm->notify_req)
+		goto fail;
+	ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!ecm->notify_req->buf)
+		goto fail;
+	ecm->notify_req->context = ecm;
+	ecm->notify_req->complete = ecm_notify_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(ecm_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_ecm_in_desc.bEndpointAddress =
+				fs_ecm_in_desc.bEndpointAddress;
+		hs_ecm_out_desc.bEndpointAddress =
+				fs_ecm_out_desc.bEndpointAddress;
+		hs_ecm_notify_desc.bEndpointAddress =
+				fs_ecm_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(ecm_hs_function);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_ecm_in_desc.bEndpointAddress =
+				fs_ecm_in_desc.bEndpointAddress;
+		ss_ecm_out_desc.bEndpointAddress =
+				fs_ecm_out_desc.bEndpointAddress;
+		ss_ecm_notify_desc.bEndpointAddress =
+				fs_ecm_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(ecm_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	ecm->port.open = ecm_open;
+	ecm->port.close = ecm_close;
+
+	DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			ecm->port.in_ep->name, ecm->port.out_ep->name,
+			ecm->notify->name);
+	return 0;
+
+fail:
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+	if (f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+
+	if (ecm->notify_req) {
+		kfree(ecm->notify_req->buf);
+		usb_ep_free_request(ecm->notify, ecm->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (ecm->notify)
+		ecm->notify->driver_data = NULL;
+	if (ecm->port.out_ep)
+		ecm->port.out_ep->driver_data = NULL;
+	if (ecm->port.in_ep)
+		ecm->port.in_ep->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+ecm_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_ecm		*ecm = func_to_ecm(f);
+
+	printk( "ecm unbind\n");
+
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(ecm->notify_req->buf);
+	usb_ep_free_request(ecm->notify, ecm->notify_req);
+
+	ecm_string_defs[3].s = NULL;
+	ecm_string_defs[4].s = NULL;
+	ecm_string_defs[5].s = NULL;
+	ecm_string_defs[6].s = NULL;
+	
+	if(ecm->work_schedule_state == 1){
+			usb_printk("ecm_unbind,cancel delay work\n");
+			cancel_delayed_work_sync(&ecm->work);
+			ecm->work_schedule_state = 0;
+			ecm_setup_work_time = 0;
+			ecm_work_run_cnt = 0;
+	}
+	kfree(ecm);
+}
+
+/**
+ * ecm_bind_config - add CDC Ethernet network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	return ecm_bind_config_num(c, ethaddr, 0);
+}
+
+/*----------------- --------- for multi-ecm ------------------------------------*/
+
+int
+ecm_bind_config_num(struct usb_configuration *c, u8 ethaddr[ETH_ALEN], u8 num)
+{
+	struct f_ecm	*ecm;
+	int		status;
+
+	if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+		return -EINVAL;
+	if((num + 3) >= (sizeof(ecm_string_defs) / sizeof(struct usb_string)))
+		return -EINVAL;
+	/* maybe allocate device-global string IDs */
+	if (ecm_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_string_defs[0].id = status;
+		ecm_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_string_defs[1].id = status;
+		ecm_data_intf.iInterface = status;
+
+
+		/* IAD label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_string_defs[2].id = status;
+		ecm_iad_descriptor.iFunction = status;
+	}
+
+	/* alloccate  MAC address */
+    
+	if (ecm_string_defs[3+num].id == 0) {
+		/* MAC address */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_string_defs[3+num].id = status;
+		//ecm_desc.iMACAddress = status;
+	}
+	
+	/* allocate and initialize one new instance */
+	ecm = kzalloc(sizeof *ecm, GFP_KERNEL);
+	if (!ecm)
+		return -ENOMEM;
+
+	/* export host's Ethernet address in CDC format */
+	snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+		"%02X%02X%02X%02X%02X%02X",
+		ethaddr[0], ethaddr[1], ethaddr[2],
+		ethaddr[3], ethaddr[4], ethaddr[5]);
+	ecm_string_defs[3+num].s = ecm->ethaddr;
+
+	ecm->port.cdc_filter = DEFAULT_FILTER;
+
+	ecm->port.func.name = "cdc_ethernet";
+	ecm->port.func.strings = ecm_strings;
+	/* descriptors are per-instance copies */
+	ecm->port.func.bind = ecm_bind;
+	ecm->port.func.unbind = ecm_unbind;
+	ecm->port.func.set_alt = ecm_set_alt;
+	ecm->port.func.get_alt = ecm_get_alt;
+	ecm->port.func.setup = ecm_setup;
+	ecm->port.func.disable = ecm_disable;
+	
+#if 0 //#ifdef CONFIG_PM
+    ecm->port.func.suspend = ecm_suspend;
+    ecm->port.func.resume = ecm_resume;
+#else
+	//for macOS,should send connect indication after resume
+    ecm->port.func.resume = ecm_resume_notify;
+
+#endif
+	ecm->num = num;
+	if(force_net == 0){
+		if(ecm->work_init_state == 0){
+			INIT_DELAYED_WORK(&ecm->work, ecm_setup_work);
+			ecm->work_init_state = 1;
+		} 
+		if(ecm_setup_work_time == 0){		
+			schedule_delayed_work(&ecm->work, msecs_to_jiffies(6000));
+			ecm->work_schedule_state = 1;
+			ecm_setup_work_time = 1;
+			printk("ecm_bind_config_num init and schedule ecm setup work,run_cnt:%d\n",ecm_work_run_cnt);
+		}else{
+			printk(" set again, run_cnt:%d\n",ecm_work_run_cnt);
+			if(ecm_work_run_cnt < 5){
+			printk("schedule ecm setup work again, run_cnt:%d\n",ecm_work_run_cnt);
+				schedule_delayed_work(&ecm->work, msecs_to_jiffies(6000));
+				ecm->work_schedule_state = 1;				
+			}else{
+				ecm_setup_work_time = 0;
+			}
+				
+		}
+	}
+	
+	status = usb_add_function(c, &ecm->port.func);
+	if (status) {
+		ecm_string_defs[3+num].s = NULL;
+		if(ecm->work_schedule_state == 1){
+			printk("ecm_bind,cancel delay work\n");
+			cancel_delayed_work_sync(&ecm->work);
+			ecm->work_schedule_state = 0;
+			ecm_setup_work_time = 0;
+			ecm_work_run_cnt = 0;
+		}
+		kfree(ecm);
+	}
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_eem.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_eem.c
new file mode 100644
index 0000000..a9cf205
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_eem.c
@@ -0,0 +1,597 @@
+/*
+ * f_eem.c -- USB CDC Ethernet (EEM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 EF Johnson Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#include "u_ether.h"
+
+#define EEM_HLEN 2
+
+/*
+ * This function is a "CDC Ethernet Emulation Model" (CDC EEM)
+ * Ethernet link.
+ */
+
+struct f_eem {
+	struct gether			port;
+	u8				ctrl_id;
+};
+
+static inline struct f_eem *func_to_eem(struct usb_function *f)
+{
+	return container_of(f, struct f_eem, port.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor eem_intf __initdata = {
+	.bLength =		sizeof eem_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_EEM,
+	.bInterfaceProtocol =	USB_CDC_PROTO_EEM,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eem_fs_function[] __initdata = {
+	/* CDC EEM control descriptors */
+	(struct usb_descriptor_header *) &eem_intf,
+	(struct usb_descriptor_header *) &eem_fs_in_desc,
+	(struct usb_descriptor_header *) &eem_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eem_hs_function[] __initdata = {
+	/* CDC EEM control descriptors */
+	(struct usb_descriptor_header *) &eem_intf,
+	(struct usb_descriptor_header *) &eem_hs_in_desc,
+	(struct usb_descriptor_header *) &eem_hs_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor eem_ss_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor eem_ss_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc __initdata = {
+	.bLength =		sizeof eem_ss_bulk_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *eem_ss_function[] __initdata = {
+	/* CDC EEM control descriptors */
+	(struct usb_descriptor_header *) &eem_intf,
+	(struct usb_descriptor_header *) &eem_ss_in_desc,
+	(struct usb_descriptor_header *) &eem_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &eem_ss_out_desc,
+	(struct usb_descriptor_header *) &eem_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string eem_string_defs[] = {
+	[0].s = "CDC Ethernet Emulation Model (EEM)",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings eem_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		eem_string_defs,
+};
+
+static struct usb_gadget_strings *eem_strings[] = {
+	&eem_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+		ctrl->bRequestType, ctrl->bRequest,
+		w_value, w_index, w_length);
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_eem		*eem = func_to_eem(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct net_device	*net;
+
+	/* we know alt == 0, so this is an activation or a reset */
+	if (alt != 0)
+		goto fail;
+
+	if (intf == eem->ctrl_id) {
+
+		if (eem->port.in_ep->driver_data) {
+			DBG(cdev, "reset eem\n");
+			gether_disconnect(&eem->port);
+		}
+
+		if (!eem->port.in_ep->desc || !eem->port.out_ep->desc) {
+			DBG(cdev, "init eem\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       eem->port.in_ep) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       eem->port.out_ep)) {
+				eem->port.in_ep->desc = NULL;
+				eem->port.out_ep->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* zlps should not occur because zero-length EEM packets
+		 * will be inserted in those cases where they would occur
+		 */
+		eem->port.is_zlp_ok = 1;
+		eem->port.cdc_filter = DEFAULT_FILTER;
+		DBG(cdev, "activate eem\n");
+		net = gether_connect(&eem->port);
+		if (IS_ERR(net))
+			return PTR_ERR(net);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static void eem_disable(struct usb_function *f)
+{
+	struct f_eem		*eem = func_to_eem(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "eem deactivated\n");
+
+	if (eem->port.in_ep->driver_data)
+		gether_disconnect(&eem->port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EEM function driver setup/binding */
+
+static int __init
+eem_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_eem		*eem = func_to_eem(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	eem->ctrl_id = status;
+	eem_intf.bInterfaceNumber = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc);
+	if (!ep)
+		goto fail;
+	eem->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc);
+	if (!ep)
+		goto fail;
+	eem->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(eem_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		eem_hs_in_desc.bEndpointAddress =
+				eem_fs_in_desc.bEndpointAddress;
+		eem_hs_out_desc.bEndpointAddress =
+				eem_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(eem_hs_function);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		eem_ss_in_desc.bEndpointAddress =
+				eem_fs_in_desc.bEndpointAddress;
+		eem_ss_out_desc.bEndpointAddress =
+				eem_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(eem_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			eem->port.in_ep->name, eem->port.out_ep->name);
+	return 0;
+
+fail:
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+	if (f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+
+	if (eem->port.out_ep)
+		eem->port.out_ep->driver_data = NULL;
+	if (eem->port.in_ep)
+		eem->port.in_ep->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+eem_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_eem	*eem = func_to_eem(f);
+
+	DBG(c->cdev, "eem unbind\n");
+
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+	kfree(eem);
+}
+
+static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct sk_buff *skb = (struct sk_buff *)req->context;
+
+	dev_kfree_skb_any(skb);
+}
+
+/*
+ * Add the EEM header and ethernet checksum.
+ * We currently do not attempt to put multiple ethernet frames
+ * into a single USB transfer
+ */
+static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
+{
+	struct sk_buff	*skb2 = NULL;
+	struct usb_ep	*in = port->in_ep;
+	int		padlen = 0;
+	u16		len = skb->len;
+
+	if (!skb_cloned(skb)) {
+		int headroom = skb_headroom(skb);
+		int tailroom = skb_tailroom(skb);
+
+		/* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
+		 * stick two bytes of zero-length EEM packet on the end.
+		 */
+		if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0)
+			padlen += 2;
+
+		if ((tailroom >= (ETH_FCS_LEN + padlen)) &&
+				(headroom >= EEM_HLEN))
+			goto done;
+	}
+
+	skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC);
+	dev_kfree_skb_any(skb);
+	skb = skb2;
+	if (!skb)
+		return skb;
+
+done:
+	/* use the "no CRC" option */
+	put_unaligned_be32(0xdeadbeef, skb_put(skb, 4));
+
+	/* EEM packet header format:
+	 * b0..13:	length of ethernet frame
+	 * b14:		bmCRC (0 == sentinel CRC)
+	 * b15:		bmType (0 == data)
+	 */
+	len = skb->len;
+	put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
+
+	/* add a zero-length EEM packet, if needed */
+	if (padlen)
+		put_unaligned_le16(0, skb_put(skb, 2));
+
+	return skb;
+}
+
+/*
+ * Remove the EEM header.  Note that there can be many EEM packets in a single
+ * USB transfer, so we need to break them out and handle them independently.
+ */
+static int eem_unwrap(struct gether *port,
+			struct sk_buff *skb,
+			struct sk_buff_head *list)
+{
+	struct usb_composite_dev	*cdev = port->func.config->cdev;
+	int				status = 0;
+
+	do {
+		struct sk_buff	*skb2;
+		u16		header;
+		u16		len = 0;
+
+		if (skb->len < EEM_HLEN) {
+			status = -EINVAL;
+			DBG(cdev, "invalid EEM header\n");
+			goto error;
+		}
+
+		/* remove the EEM header */
+		header = get_unaligned_le16(skb->data);
+		skb_pull(skb, EEM_HLEN);
+
+		/* EEM packet header format:
+		 * b0..14:	EEM type dependent (data or command)
+		 * b15:		bmType (0 == data, 1 == command)
+		 */
+		if (header & BIT(15)) {
+			struct usb_request	*req = cdev->req;
+			u16			bmEEMCmd;
+
+			/* EEM command packet format:
+			 * b0..10:	bmEEMCmdParam
+			 * b11..13:	bmEEMCmd
+			 * b14:		reserved (must be zero)
+			 * b15:		bmType (1 == command)
+			 */
+			if (header & BIT(14))
+				continue;
+
+			bmEEMCmd = (header >> 11) & 0x7;
+			switch (bmEEMCmd) {
+			case 0: /* echo */
+				len = header & 0x7FF;
+				if (skb->len < len) {
+					status = -EOVERFLOW;
+					goto error;
+				}
+
+				skb2 = skb_clone(skb, GFP_ATOMIC);
+				if (unlikely(!skb2)) {
+					DBG(cdev, "EEM echo response error\n");
+					goto next;
+				}
+				skb_trim(skb2, len);
+				put_unaligned_le16(BIT(15) | BIT(11) | len,
+							skb_push(skb2, 2));
+				skb_copy_bits(skb2, 0, req->buf, skb2->len);
+				req->length = skb2->len;
+				req->complete = eem_cmd_complete;
+				req->zero = 1;
+				req->context = skb2;
+				if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
+					DBG(cdev, "echo response queue fail\n");
+				break;
+
+			case 1:  /* echo response */
+			case 2:  /* suspend hint */
+			case 3:  /* response hint */
+			case 4:  /* response complete hint */
+			case 5:  /* tickle */
+			default: /* reserved */
+				continue;
+			}
+		} else {
+			u32		crc, crc2;
+			struct sk_buff	*skb3;
+
+			/* check for zero-length EEM packet */
+			if (header == 0)
+				continue;
+
+			/* EEM data packet format:
+			 * b0..13:	length of ethernet frame
+			 * b14:		bmCRC (0 == sentinel, 1 == calculated)
+			 * b15:		bmType (0 == data)
+			 */
+			len = header & 0x3FFF;
+			if ((skb->len < len)
+					|| (len < (ETH_HLEN + ETH_FCS_LEN))) {
+				status = -EINVAL;
+				goto error;
+			}
+
+			/* validate CRC */
+			if (header & BIT(14)) {
+				crc = get_unaligned_le32(skb->data + len
+							- ETH_FCS_LEN);
+				crc2 = ~crc32_le(~0,
+						skb->data, len - ETH_FCS_LEN);
+			} else {
+				crc = get_unaligned_be32(skb->data + len
+							- ETH_FCS_LEN);
+				crc2 = 0xdeadbeef;
+			}
+			if (crc != crc2) {
+				DBG(cdev, "invalid EEM CRC\n");
+				goto next;
+			}
+
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (unlikely(!skb2)) {
+				DBG(cdev, "unable to unframe EEM packet\n");
+				continue;
+			}
+			skb_trim(skb2, len - ETH_FCS_LEN);
+
+			skb3 = skb_copy_expand(skb2,
+						NET_IP_ALIGN,
+						0,
+						GFP_ATOMIC);
+			if (unlikely(!skb3)) {
+				DBG(cdev, "unable to realign EEM packet\n");
+				dev_kfree_skb_any(skb2);
+				continue;
+			}
+			dev_kfree_skb_any(skb2);
+			skb_queue_tail(list, skb3);
+		}
+next:
+		skb_pull(skb, len);
+	} while (skb->len);
+
+error:
+	dev_kfree_skb_any(skb);
+	return status;
+}
+
+/**
+ * eem_bind_config - add CDC Ethernet (EEM) network link to a configuration
+ * @c: the configuration to support the network link
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int __init eem_bind_config(struct usb_configuration *c)
+{
+	struct f_eem	*eem;
+	int		status;
+
+	/* maybe allocate device-global string IDs */
+	if (eem_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		eem_string_defs[0].id = status;
+		eem_intf.iInterface = status;
+	}
+
+	/* allocate and initialize one new instance */
+	eem = kzalloc(sizeof *eem, GFP_KERNEL);
+	if (!eem)
+		return -ENOMEM;
+
+	eem->port.cdc_filter = DEFAULT_FILTER;
+
+	eem->port.func.name = "cdc_eem";
+	eem->port.func.strings = eem_strings;
+	/* descriptors are per-instance copies */
+	eem->port.func.bind = eem_bind;
+	eem->port.func.unbind = eem_unbind;
+	eem->port.func.set_alt = eem_set_alt;
+	eem->port.func.setup = eem_setup;
+	eem->port.func.disable = eem_disable;
+	eem->port.wrap = eem_wrap;
+	eem->port.unwrap = eem_unwrap;
+	eem->port.header_len = EEM_HLEN;
+
+	status = usb_add_function(c, &eem->port.func);
+	if (status)
+		kfree(eem);
+	return status;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_fs.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_fs.c
new file mode 100644
index 0000000..e8d7923
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_fs.c
@@ -0,0 +1,2417 @@
+/*
+ * f_fs.c -- user mode file system API for USB composite function controllers
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Author: Michal Nazarewicz <mina86@mina86.com>
+ *
+ * Based on inode.c (GadgetFS) which was:
+ * Copyright (C) 2003-2004 David Brownell
+ * Copyright (C) 2003 Agilent Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <linux/export.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb/composite.h>
+#include <linux/usb/functionfs.h>
+
+
+#define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
+
+
+/* Debugging ****************************************************************/
+
+#ifdef VERBOSE_DEBUG
+#  define pr_vdebug pr_debug
+#  define ffs_dump_mem(prefix, ptr, len) \
+	print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
+#else
+#  define pr_vdebug(...)                 do { } while (0)
+#  define ffs_dump_mem(prefix, ptr, len) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define ENTER()    pr_vdebug("%s()\n", __func__)
+
+
+/* The data structure and setup file ****************************************/
+
+enum ffs_state {
+	/*
+	 * Waiting for descriptors and strings.
+	 *
+	 * In this state no open(2), read(2) or write(2) on epfiles
+	 * may succeed (which should not be the problem as there
+	 * should be no such files opened in the first place).
+	 */
+	FFS_READ_DESCRIPTORS,
+	FFS_READ_STRINGS,
+
+	/*
+	 * We've got descriptors and strings.  We are or have called
+	 * functionfs_ready_callback().  functionfs_bind() may have
+	 * been called but we don't know.
+	 *
+	 * This is the only state in which operations on epfiles may
+	 * succeed.
+	 */
+	FFS_ACTIVE,
+
+	/*
+	 * All endpoints have been closed.  This state is also set if
+	 * we encounter an unrecoverable error.  The only
+	 * unrecoverable error is situation when after reading strings
+	 * from user space we fail to initialise epfiles or
+	 * functionfs_ready_callback() returns with error (<0).
+	 *
+	 * In this state no open(2), read(2) or write(2) (both on ep0
+	 * as well as epfile) may succeed (at this point epfiles are
+	 * unlinked and all closed so this is not a problem; ep0 is
+	 * also closed but ep0 file exists and so open(2) on ep0 must
+	 * fail).
+	 */
+	FFS_CLOSING
+};
+
+
+enum ffs_setup_state {
+	/* There is no setup request pending. */
+	FFS_NO_SETUP,
+	/*
+	 * User has read events and there was a setup request event
+	 * there.  The next read/write on ep0 will handle the
+	 * request.
+	 */
+	FFS_SETUP_PENDING,
+	/*
+	 * There was event pending but before user space handled it
+	 * some other event was introduced which canceled existing
+	 * setup.  If this state is set read/write on ep0 return
+	 * -EIDRM.  This state is only set when adding event.
+	 */
+	FFS_SETUP_CANCELED
+};
+
+
+
+struct ffs_epfile;
+struct ffs_function;
+
+struct ffs_data {
+	struct usb_gadget		*gadget;
+
+	/*
+	 * Protect access read/write operations, only one read/write
+	 * at a time.  As a consequence protects ep0req and company.
+	 * While setup request is being processed (queued) this is
+	 * held.
+	 */
+	struct mutex			mutex;
+
+	/*
+	 * Protect access to endpoint related structures (basically
+	 * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
+	 * endpoint zero.
+	 */
+	spinlock_t			eps_lock;
+
+	/*
+	 * XXX REVISIT do we need our own request? Since we are not
+	 * handling setup requests immediately user space may be so
+	 * slow that another setup will be sent to the gadget but this
+	 * time not to us but another function and then there could be
+	 * a race.  Is that the case? Or maybe we can use cdev->req
+	 * after all, maybe we just need some spinlock for that?
+	 */
+	struct usb_request		*ep0req;		/* P: mutex */
+	struct completion		ep0req_completion;	/* P: mutex */
+	int				ep0req_status;		/* P: mutex */
+
+	/* reference counter */
+	atomic_t			ref;
+	/* how many files are opened (EP0 and others) */
+	atomic_t			opened;
+
+	/* EP0 state */
+	enum ffs_state			state;
+
+	/*
+	 * Possible transitions:
+	 * + FFS_NO_SETUP       -> FFS_SETUP_PENDING  -- P: ev.waitq.lock
+	 *               happens only in ep0 read which is P: mutex
+	 * + FFS_SETUP_PENDING  -> FFS_NO_SETUP       -- P: ev.waitq.lock
+	 *               happens only in ep0 i/o  which is P: mutex
+	 * + FFS_SETUP_PENDING  -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
+	 * + FFS_SETUP_CANCELED -> FFS_NO_SETUP       -- cmpxchg
+	 */
+	enum ffs_setup_state		setup_state;
+
+#define FFS_SETUP_STATE(ffs)					\
+	((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state,	\
+				       FFS_SETUP_CANCELED, FFS_NO_SETUP))
+
+	/* Events & such. */
+	struct {
+		u8				types[4];
+		unsigned short			count;
+		/* XXX REVISIT need to update it in some places, or do we? */
+		unsigned short			can_stall;
+		struct usb_ctrlrequest		setup;
+
+		wait_queue_head_t		waitq;
+	} ev; /* the whole structure, P: ev.waitq.lock */
+
+	/* Flags */
+	unsigned long			flags;
+#define FFS_FL_CALL_CLOSED_CALLBACK 0
+#define FFS_FL_BOUND                1
+
+	/* Active function */
+	struct ffs_function		*func;
+
+	/*
+	 * Device name, write once when file system is mounted.
+	 * Intended for user to read if she wants.
+	 */
+	const char			*dev_name;
+	/* Private data for our user (ie. gadget).  Managed by user. */
+	void				*private_data;
+
+	/* filled by __ffs_data_got_descs() */
+	/*
+	 * Real descriptors are 16 bytes after raw_descs (so you need
+	 * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
+	 * first full speed descriptor).  raw_descs_length and
+	 * raw_fs_descs_length do not have those 16 bytes added.
+	 */
+	const void			*raw_descs;
+	unsigned			raw_descs_length;
+	unsigned			raw_fs_descs_length;
+	unsigned			fs_descs_count;
+	unsigned			hs_descs_count;
+
+	unsigned short			strings_count;
+	unsigned short			interfaces_count;
+	unsigned short			eps_count;
+	unsigned short			_pad1;
+
+	/* filled by __ffs_data_got_strings() */
+	/* ids in stringtabs are set in functionfs_bind() */
+	const void			*raw_strings;
+	struct usb_gadget_strings	**stringtabs;
+
+	/*
+	 * File system's super block, write once when file system is
+	 * mounted.
+	 */
+	struct super_block		*sb;
+
+	/* File permissions, written once when fs is mounted */
+	struct ffs_file_perms {
+		umode_t				mode;
+		uid_t				uid;
+		gid_t				gid;
+	}				file_perms;
+
+	/*
+	 * The endpoint files, filled by ffs_epfiles_create(),
+	 * destroyed by ffs_epfiles_destroy().
+	 */
+	struct ffs_epfile		*epfiles;
+};
+
+/* Reference counter handling */
+static void ffs_data_get(struct ffs_data *ffs);
+static void ffs_data_put(struct ffs_data *ffs);
+/* Creates new ffs_data object. */
+static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
+
+/* Opened counter handling. */
+static void ffs_data_opened(struct ffs_data *ffs);
+static void ffs_data_closed(struct ffs_data *ffs);
+
+/* Called with ffs->mutex held; take over ownership of data. */
+static int __must_check
+__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
+static int __must_check
+__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+
+
+/* The function structure ***************************************************/
+
+struct ffs_ep;
+
+struct ffs_function {
+	struct usb_configuration	*conf;
+	struct usb_gadget		*gadget;
+	struct ffs_data			*ffs;
+
+	struct ffs_ep			*eps;
+	u8				eps_revmap[16];
+	short				*interfaces_nums;
+
+	struct usb_function		function;
+};
+
+
+static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
+{
+	return container_of(f, struct ffs_function, function);
+}
+
+static void ffs_func_free(struct ffs_function *func);
+
+static void ffs_func_eps_disable(struct ffs_function *func);
+static int __must_check ffs_func_eps_enable(struct ffs_function *func);
+
+static int ffs_func_bind(struct usb_configuration *,
+			 struct usb_function *);
+static void ffs_func_unbind(struct usb_configuration *,
+			    struct usb_function *);
+static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
+static void ffs_func_disable(struct usb_function *);
+static int ffs_func_setup(struct usb_function *,
+			  const struct usb_ctrlrequest *);
+static void ffs_func_suspend(struct usb_function *);
+static void ffs_func_resume(struct usb_function *);
+
+
+static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
+static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
+
+
+/* The endpoints structures *************************************************/
+
+struct ffs_ep {
+	struct usb_ep			*ep;	/* P: ffs->eps_lock */
+	struct usb_request		*req;	/* P: epfile->mutex */
+
+	/* [0]: full speed, [1]: high speed */
+	struct usb_endpoint_descriptor	*descs[2];
+
+	u8				num;
+
+	int				status;	/* P: epfile->mutex */
+};
+
+struct ffs_epfile {
+	/* Protects ep->ep and ep->req. */
+	struct mutex			mutex;
+	wait_queue_head_t		wait;
+
+	struct ffs_data			*ffs;
+	struct ffs_ep			*ep;	/* P: ffs->eps_lock */
+
+	struct dentry			*dentry;
+
+	char				name[5];
+
+	unsigned char			in;	/* P: ffs->eps_lock */
+	unsigned char			isoc;	/* P: ffs->eps_lock */
+
+	unsigned char			_pad;
+};
+
+static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
+static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
+
+static struct inode *__must_check
+ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
+		   const struct file_operations *fops,
+		   struct dentry **dentry_p);
+
+
+/* Misc helper functions ****************************************************/
+
+static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+	__attribute__((warn_unused_result, nonnull));
+static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+	__attribute__((warn_unused_result, nonnull));
+
+
+/* Control file aka ep0 *****************************************************/
+
+static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct ffs_data *ffs = req->context;
+
+	complete_all(&ffs->ep0req_completion);
+}
+
+static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+{
+	struct usb_request *req = ffs->ep0req;
+	int ret;
+
+	req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
+
+	spin_unlock_irq(&ffs->ev.waitq.lock);
+
+	req->buf      = data;
+	req->length   = len;
+
+	/*
+	 * UDC layer requires to provide a buffer even for ZLP, but should
+	 * not use it at all. Let's provide some poisoned pointer to catch
+	 * possible bug in the driver.
+	 */
+	if (req->buf == NULL)
+		req->buf = (void *)0xDEADBABE;
+
+	INIT_COMPLETION(ffs->ep0req_completion);
+
+	ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
+	if (unlikely(ret < 0))
+		return ret;
+
+	ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
+	if (unlikely(ret)) {
+		usb_ep_dequeue(ffs->gadget->ep0, req);
+		return -EINTR;
+	}
+
+	ffs->setup_state = FFS_NO_SETUP;
+	return ffs->ep0req_status;
+}
+
+static int __ffs_ep0_stall(struct ffs_data *ffs)
+{
+	if (ffs->ev.can_stall) {
+		pr_vdebug("ep0 stall\n");
+		usb_ep_set_halt(ffs->gadget->ep0);
+		ffs->setup_state = FFS_NO_SETUP;
+		return -EL2HLT;
+	} else {
+		pr_debug("bogus ep0 stall!\n");
+		return -ESRCH;
+	}
+}
+
+static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
+			     size_t len, loff_t *ptr)
+{
+	struct ffs_data *ffs = file->private_data;
+	ssize_t ret;
+	char *data;
+
+	ENTER();
+
+	/* Fast check if setup was canceled */
+	if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+		return -EIDRM;
+
+	/* Acquire mutex */
+	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+	if (unlikely(ret < 0))
+		return ret;
+
+	/* Check state */
+	switch (ffs->state) {
+	case FFS_READ_DESCRIPTORS:
+	case FFS_READ_STRINGS:
+		/* Copy data */
+		if (unlikely(len < 16)) {
+			ret = -EINVAL;
+			break;
+		}
+
+		data = ffs_prepare_buffer(buf, len);
+		if (IS_ERR(data)) {
+			ret = PTR_ERR(data);
+			break;
+		}
+
+		/* Handle data */
+		if (ffs->state == FFS_READ_DESCRIPTORS) {
+			pr_info("read descriptors\n");
+			ret = __ffs_data_got_descs(ffs, data, len);
+			if (unlikely(ret < 0))
+				break;
+
+			ffs->state = FFS_READ_STRINGS;
+			ret = len;
+		} else {
+			pr_info("read strings\n");
+			ret = __ffs_data_got_strings(ffs, data, len);
+			if (unlikely(ret < 0))
+				break;
+
+			ret = ffs_epfiles_create(ffs);
+			if (unlikely(ret)) {
+				ffs->state = FFS_CLOSING;
+				break;
+			}
+
+			ffs->state = FFS_ACTIVE;
+			mutex_unlock(&ffs->mutex);
+
+			ret = functionfs_ready_callback(ffs);
+			if (unlikely(ret < 0)) {
+				ffs->state = FFS_CLOSING;
+				return ret;
+			}
+
+			set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
+			return len;
+		}
+		break;
+
+	case FFS_ACTIVE:
+		data = NULL;
+		/*
+		 * We're called from user space, we can use _irq
+		 * rather then _irqsave
+		 */
+		spin_lock_irq(&ffs->ev.waitq.lock);
+		switch (FFS_SETUP_STATE(ffs)) {
+		case FFS_SETUP_CANCELED:
+			ret = -EIDRM;
+			goto done_spin;
+
+		case FFS_NO_SETUP:
+			ret = -ESRCH;
+			goto done_spin;
+
+		case FFS_SETUP_PENDING:
+			break;
+		}
+
+		/* FFS_SETUP_PENDING */
+		if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
+			spin_unlock_irq(&ffs->ev.waitq.lock);
+			ret = __ffs_ep0_stall(ffs);
+			break;
+		}
+
+		/* FFS_SETUP_PENDING and not stall */
+		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+
+		spin_unlock_irq(&ffs->ev.waitq.lock);
+
+		data = ffs_prepare_buffer(buf, len);
+		if (IS_ERR(data)) {
+			ret = PTR_ERR(data);
+			break;
+		}
+
+		spin_lock_irq(&ffs->ev.waitq.lock);
+
+		/*
+		 * We are guaranteed to be still in FFS_ACTIVE state
+		 * but the state of setup could have changed from
+		 * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
+		 * to check for that.  If that happened we copied data
+		 * from user space in vain but it's unlikely.
+		 *
+		 * For sure we are not in FFS_NO_SETUP since this is
+		 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
+		 * transition can be performed and it's protected by
+		 * mutex.
+		 */
+		if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+			ret = -EIDRM;
+done_spin:
+			spin_unlock_irq(&ffs->ev.waitq.lock);
+		} else {
+			/* unlocks spinlock */
+			ret = __ffs_ep0_queue_wait(ffs, data, len);
+		}
+		kfree(data);
+		break;
+
+	default:
+		ret = -EBADFD;
+		break;
+	}
+
+	mutex_unlock(&ffs->mutex);
+	return ret;
+}
+
+static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
+				     size_t n)
+{
+	/*
+	 * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
+	 * to release them.
+	 */
+	struct usb_functionfs_event events[n];
+	unsigned i = 0;
+
+	memset(events, 0, sizeof events);
+
+	do {
+		events[i].type = ffs->ev.types[i];
+		if (events[i].type == FUNCTIONFS_SETUP) {
+			events[i].u.setup = ffs->ev.setup;
+			ffs->setup_state = FFS_SETUP_PENDING;
+		}
+	} while (++i < n);
+
+	if (n < ffs->ev.count) {
+		ffs->ev.count -= n;
+		memmove(ffs->ev.types, ffs->ev.types + n,
+			ffs->ev.count * sizeof *ffs->ev.types);
+	} else {
+		ffs->ev.count = 0;
+	}
+
+	spin_unlock_irq(&ffs->ev.waitq.lock);
+	mutex_unlock(&ffs->mutex);
+
+	return unlikely(__copy_to_user(buf, events, sizeof events))
+		? -EFAULT : sizeof events;
+}
+
+static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
+			    size_t len, loff_t *ptr)
+{
+	struct ffs_data *ffs = file->private_data;
+	char *data = NULL;
+	size_t n;
+	int ret;
+
+	ENTER();
+
+	/* Fast check if setup was canceled */
+	if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+		return -EIDRM;
+
+	/* Acquire mutex */
+	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+	if (unlikely(ret < 0))
+		return ret;
+
+	/* Check state */
+	if (ffs->state != FFS_ACTIVE) {
+		ret = -EBADFD;
+		goto done_mutex;
+	}
+
+	/*
+	 * We're called from user space, we can use _irq rather then
+	 * _irqsave
+	 */
+	spin_lock_irq(&ffs->ev.waitq.lock);
+
+	switch (FFS_SETUP_STATE(ffs)) {
+	case FFS_SETUP_CANCELED:
+		ret = -EIDRM;
+		break;
+
+	case FFS_NO_SETUP:
+		n = len / sizeof(struct usb_functionfs_event);
+		if (unlikely(!n)) {
+			ret = -EINVAL;
+			break;
+		}
+
+		if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
+							ffs->ev.count)) {
+			ret = -EINTR;
+			break;
+		}
+
+		return __ffs_ep0_read_events(ffs, buf,
+					     min(n, (size_t)ffs->ev.count));
+
+	case FFS_SETUP_PENDING:
+		if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
+			spin_unlock_irq(&ffs->ev.waitq.lock);
+			ret = __ffs_ep0_stall(ffs);
+			goto done_mutex;
+		}
+
+		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+
+		spin_unlock_irq(&ffs->ev.waitq.lock);
+
+		if (likely(len)) {
+			data = kmalloc(len, GFP_KERNEL);
+			if (unlikely(!data)) {
+				ret = -ENOMEM;
+				goto done_mutex;
+			}
+		}
+
+		spin_lock_irq(&ffs->ev.waitq.lock);
+
+		/* See ffs_ep0_write() */
+		if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+			ret = -EIDRM;
+			break;
+		}
+
+		/* unlocks spinlock */
+		ret = __ffs_ep0_queue_wait(ffs, data, len);
+		if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
+			ret = -EFAULT;
+		goto done_mutex;
+
+	default:
+		ret = -EBADFD;
+		break;
+	}
+
+	spin_unlock_irq(&ffs->ev.waitq.lock);
+done_mutex:
+	mutex_unlock(&ffs->mutex);
+	kfree(data);
+	return ret;
+}
+
+static int ffs_ep0_open(struct inode *inode, struct file *file)
+{
+	struct ffs_data *ffs = inode->i_private;
+
+	ENTER();
+
+	if (unlikely(ffs->state == FFS_CLOSING))
+		return -EBUSY;
+
+	file->private_data = ffs;
+	ffs_data_opened(ffs);
+
+	return 0;
+}
+
+static int ffs_ep0_release(struct inode *inode, struct file *file)
+{
+	struct ffs_data *ffs = file->private_data;
+
+	ENTER();
+
+	ffs_data_closed(ffs);
+
+	return 0;
+}
+
+static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
+{
+	struct ffs_data *ffs = file->private_data;
+	struct usb_gadget *gadget = ffs->gadget;
+	long ret;
+
+	ENTER();
+
+	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
+		struct ffs_function *func = ffs->func;
+		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
+	} else if (gadget && gadget->ops->ioctl) {
+		ret = gadget->ops->ioctl(gadget, code, value);
+	} else {
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+static const struct file_operations ffs_ep0_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.open =		ffs_ep0_open,
+	.write =	ffs_ep0_write,
+	.read =		ffs_ep0_read,
+	.release =	ffs_ep0_release,
+	.unlocked_ioctl =	ffs_ep0_ioctl,
+};
+
+
+/* "Normal" endpoints operations ********************************************/
+
+static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
+{
+	ENTER();
+	if (likely(req->context)) {
+		struct ffs_ep *ep = _ep->driver_data;
+		ep->status = req->status ? req->status : req->actual;
+		complete(req->context);
+	}
+}
+
+static ssize_t ffs_epfile_io(struct file *file,
+			     char __user *buf, size_t len, int read)
+{
+	struct ffs_epfile *epfile = file->private_data;
+	struct ffs_ep *ep;
+	char *data = NULL;
+	ssize_t ret;
+	int halt;
+
+	goto first_try;
+	do {
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+		mutex_unlock(&epfile->mutex);
+
+first_try:
+		/* Are we still active? */
+		if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
+			ret = -ENODEV;
+			goto error;
+		}
+
+		/* Wait for endpoint to be enabled */
+		ep = epfile->ep;
+		if (!ep) {
+			if (file->f_flags & O_NONBLOCK) {
+				ret = -EAGAIN;
+				goto error;
+			}
+
+			if (wait_event_interruptible(epfile->wait,
+						     (ep = epfile->ep))) {
+				ret = -EINTR;
+				goto error;
+			}
+		}
+
+		/* Do we halt? */
+		halt = !read == !epfile->in;
+		if (halt && epfile->isoc) {
+			ret = -EINVAL;
+			goto error;
+		}
+
+		/* Allocate & copy */
+		if (!halt && !data) {
+			data = kzalloc(len, GFP_KERNEL);
+			if (unlikely(!data))
+				return -ENOMEM;
+
+			if (!read &&
+			    unlikely(__copy_from_user(data, buf, len))) {
+				ret = -EFAULT;
+				goto error;
+			}
+		}
+
+		/* We will be using request */
+		ret = ffs_mutex_lock(&epfile->mutex,
+				     file->f_flags & O_NONBLOCK);
+		if (unlikely(ret))
+			goto error;
+
+		/*
+		 * We're called from user space, we can use _irq rather then
+		 * _irqsave
+		 */
+		spin_lock_irq(&epfile->ffs->eps_lock);
+
+		/*
+		 * While we were acquiring mutex endpoint got disabled
+		 * or changed?
+		 */
+	} while (unlikely(epfile->ep != ep));
+
+	/* Halt */
+	if (unlikely(halt)) {
+		if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
+			usb_ep_set_halt(ep->ep);
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+		ret = -EBADMSG;
+	} else {
+		/* Fire the request */
+		DECLARE_COMPLETION_ONSTACK(done);
+
+		struct usb_request *req = ep->req;
+		req->context  = &done;
+		req->complete = ffs_epfile_io_complete;
+		req->buf      = data;
+		req->length   = len;
+
+		ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+
+		if (unlikely(ret < 0)) {
+			/* nop */
+		} else if (unlikely(wait_for_completion_interruptible(&done))) {
+			ret = -EINTR;
+			usb_ep_dequeue(ep->ep, req);
+		} else {
+			ret = ep->status;
+			if (read && ret > 0 &&
+			    unlikely(copy_to_user(buf, data, ret)))
+				ret = -EFAULT;
+		}
+	}
+
+	mutex_unlock(&epfile->mutex);
+error:
+	kfree(data);
+	return ret;
+}
+
+static ssize_t
+ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
+		 loff_t *ptr)
+{
+	ENTER();
+
+	return ffs_epfile_io(file, (char __user *)buf, len, 0);
+}
+
+static ssize_t
+ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
+{
+	ENTER();
+
+	return ffs_epfile_io(file, buf, len, 1);
+}
+
+static int
+ffs_epfile_open(struct inode *inode, struct file *file)
+{
+	struct ffs_epfile *epfile = inode->i_private;
+
+	ENTER();
+
+	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+		return -ENODEV;
+
+	file->private_data = epfile;
+	ffs_data_opened(epfile->ffs);
+
+	return 0;
+}
+
+static int
+ffs_epfile_release(struct inode *inode, struct file *file)
+{
+	struct ffs_epfile *epfile = inode->i_private;
+
+	ENTER();
+
+	ffs_data_closed(epfile->ffs);
+
+	return 0;
+}
+
+static long ffs_epfile_ioctl(struct file *file, unsigned code,
+			     unsigned long value)
+{
+	struct ffs_epfile *epfile = file->private_data;
+	int ret;
+
+	ENTER();
+
+	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+		return -ENODEV;
+
+	spin_lock_irq(&epfile->ffs->eps_lock);
+	if (likely(epfile->ep)) {
+		switch (code) {
+		case FUNCTIONFS_FIFO_STATUS:
+			ret = usb_ep_fifo_status(epfile->ep->ep);
+			break;
+		case FUNCTIONFS_FIFO_FLUSH:
+			usb_ep_fifo_flush(epfile->ep->ep);
+			ret = 0;
+			break;
+		case FUNCTIONFS_CLEAR_HALT:
+			ret = usb_ep_clear_halt(epfile->ep->ep);
+			break;
+		case FUNCTIONFS_ENDPOINT_REVMAP:
+			ret = epfile->ep->num;
+			break;
+		default:
+			ret = -ENOTTY;
+		}
+	} else {
+		ret = -ENODEV;
+	}
+	spin_unlock_irq(&epfile->ffs->eps_lock);
+
+	return ret;
+}
+
+static const struct file_operations ffs_epfile_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.open =		ffs_epfile_open,
+	.write =	ffs_epfile_write,
+	.read =		ffs_epfile_read,
+	.release =	ffs_epfile_release,
+	.unlocked_ioctl =	ffs_epfile_ioctl,
+};
+
+
+/* File system and super block operations ***********************************/
+
+/*
+ * Mounting the file system creates a controller file, used first for
+ * function configuration then later for event monitoring.
+ */
+
+static struct inode *__must_check
+ffs_sb_make_inode(struct super_block *sb, void *data,
+		  const struct file_operations *fops,
+		  const struct inode_operations *iops,
+		  struct ffs_file_perms *perms)
+{
+	struct inode *inode;
+
+	ENTER();
+
+	inode = new_inode(sb);
+
+	if (likely(inode)) {
+		struct timespec current_time = CURRENT_TIME;
+
+		inode->i_ino	 = get_next_ino();
+		inode->i_mode    = perms->mode;
+		inode->i_uid     = perms->uid;
+		inode->i_gid     = perms->gid;
+		inode->i_atime   = current_time;
+		inode->i_mtime   = current_time;
+		inode->i_ctime   = current_time;
+		inode->i_private = data;
+		if (fops)
+			inode->i_fop = fops;
+		if (iops)
+			inode->i_op  = iops;
+	}
+
+	return inode;
+}
+
+/* Create "regular" file */
+static struct inode *ffs_sb_create_file(struct super_block *sb,
+					const char *name, void *data,
+					const struct file_operations *fops,
+					struct dentry **dentry_p)
+{
+	struct ffs_data	*ffs = sb->s_fs_info;
+	struct dentry	*dentry;
+	struct inode	*inode;
+
+	ENTER();
+
+	dentry = d_alloc_name(sb->s_root, name);
+	if (unlikely(!dentry))
+		return NULL;
+
+	inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
+	if (unlikely(!inode)) {
+		dput(dentry);
+		return NULL;
+	}
+
+	d_add(dentry, inode);
+	if (dentry_p)
+		*dentry_p = dentry;
+
+	return inode;
+}
+
+/* Super block */
+static const struct super_operations ffs_sb_operations = {
+	.statfs =	simple_statfs,
+	.drop_inode =	generic_delete_inode,
+};
+
+struct ffs_sb_fill_data {
+	struct ffs_file_perms perms;
+	umode_t root_mode;
+	const char *dev_name;
+};
+
+static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
+{
+	struct ffs_sb_fill_data *data = _data;
+	struct inode	*inode;
+	struct ffs_data	*ffs;
+
+	ENTER();
+
+	/* Initialise data */
+	ffs = ffs_data_new();
+	if (unlikely(!ffs))
+		goto Enomem;
+
+	ffs->sb              = sb;
+	ffs->dev_name        = data->dev_name;
+	ffs->file_perms      = data->perms;
+
+	sb->s_fs_info        = ffs;
+	sb->s_blocksize      = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic          = FUNCTIONFS_MAGIC;
+	sb->s_op             = &ffs_sb_operations;
+	sb->s_time_gran      = 1;
+
+	/* Root inode */
+	data->perms.mode = data->root_mode;
+	inode = ffs_sb_make_inode(sb, NULL,
+				  &simple_dir_operations,
+				  &simple_dir_inode_operations,
+				  &data->perms);
+	sb->s_root = d_make_root(inode);
+	if (unlikely(!sb->s_root))
+		goto Enomem;
+
+	/* EP0 file */
+	if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
+					 &ffs_ep0_operations, NULL)))
+		goto Enomem;
+
+	return 0;
+
+Enomem:
+	return -ENOMEM;
+}
+
+static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
+{
+	ENTER();
+
+	if (!opts || !*opts)
+		return 0;
+
+	for (;;) {
+		char *end, *eq, *comma;
+		unsigned long value;
+
+		/* Option limit */
+		comma = strchr(opts, ',');
+		if (comma)
+			*comma = 0;
+
+		/* Value limit */
+		eq = strchr(opts, '=');
+		if (unlikely(!eq)) {
+			pr_err("'=' missing in %s\n", opts);
+			return -EINVAL;
+		}
+		*eq = 0;
+
+		/* Parse value */
+		value = simple_strtoul(eq + 1, &end, 0);
+		if (unlikely(*end != ',' && *end != 0)) {
+			pr_err("%s: invalid value: %s\n", opts, eq + 1);
+			return -EINVAL;
+		}
+
+		/* Interpret option */
+		switch (eq - opts) {
+		case 5:
+			if (!memcmp(opts, "rmode", 5))
+				data->root_mode  = (value & 0555) | S_IFDIR;
+			else if (!memcmp(opts, "fmode", 5))
+				data->perms.mode = (value & 0666) | S_IFREG;
+			else
+				goto invalid;
+			break;
+
+		case 4:
+			if (!memcmp(opts, "mode", 4)) {
+				data->root_mode  = (value & 0555) | S_IFDIR;
+				data->perms.mode = (value & 0666) | S_IFREG;
+			} else {
+				goto invalid;
+			}
+			break;
+
+		case 3:
+			if (!memcmp(opts, "uid", 3))
+				data->perms.uid = value;
+			else if (!memcmp(opts, "gid", 3))
+				data->perms.gid = value;
+			else
+				goto invalid;
+			break;
+
+		default:
+invalid:
+			pr_err("%s: invalid option\n", opts);
+			return -EINVAL;
+		}
+
+		/* Next iteration */
+		if (!comma)
+			break;
+		opts = comma + 1;
+	}
+
+	return 0;
+}
+
+/* "mount -t functionfs dev_name /dev/function" ends up here */
+
+static struct dentry *
+ffs_fs_mount(struct file_system_type *t, int flags,
+	      const char *dev_name, void *opts)
+{
+	struct ffs_sb_fill_data data = {
+		.perms = {
+			.mode = S_IFREG | 0600,
+			.uid = 0,
+			.gid = 0
+		},
+		.root_mode = S_IFDIR | 0500,
+	};
+	int ret;
+
+	ENTER();
+
+	ret = functionfs_check_dev_callback(dev_name);
+	if (unlikely(ret < 0))
+		return ERR_PTR(ret);
+
+	ret = ffs_fs_parse_opts(&data, opts);
+	if (unlikely(ret < 0))
+		return ERR_PTR(ret);
+
+	data.dev_name = dev_name;
+	return mount_single(t, flags, &data, ffs_sb_fill);
+}
+
+static void
+ffs_fs_kill_sb(struct super_block *sb)
+{
+	ENTER();
+
+	kill_litter_super(sb);
+	if (sb->s_fs_info)
+		ffs_data_put(sb->s_fs_info);
+}
+
+static struct file_system_type ffs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "functionfs",
+	.mount		= ffs_fs_mount,
+	.kill_sb	= ffs_fs_kill_sb,
+};
+
+
+/* Driver's main init/cleanup functions *************************************/
+
+static int functionfs_init(void)
+{
+	int ret;
+
+	ENTER();
+
+	ret = register_filesystem(&ffs_fs_type);
+	if (likely(!ret))
+		pr_info("file system registered\n");
+	else
+		pr_err("failed registering file system (%d)\n", ret);
+
+	return ret;
+}
+
+static void functionfs_cleanup(void)
+{
+	ENTER();
+
+	pr_info("unloading\n");
+	unregister_filesystem(&ffs_fs_type);
+}
+
+
+/* ffs_data and ffs_function construction and destruction code **************/
+
+static void ffs_data_clear(struct ffs_data *ffs);
+static void ffs_data_reset(struct ffs_data *ffs);
+
+static void ffs_data_get(struct ffs_data *ffs)
+{
+	ENTER();
+
+	atomic_inc(&ffs->ref);
+}
+
+static void ffs_data_opened(struct ffs_data *ffs)
+{
+	ENTER();
+
+	atomic_inc(&ffs->ref);
+	atomic_inc(&ffs->opened);
+}
+
+static void ffs_data_put(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (unlikely(atomic_dec_and_test(&ffs->ref))) {
+		pr_info("%s(): freeing\n", __func__);
+		ffs_data_clear(ffs);
+		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
+		       waitqueue_active(&ffs->ep0req_completion.wait));
+		kfree(ffs);
+	}
+}
+
+static void ffs_data_closed(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (atomic_dec_and_test(&ffs->opened)) {
+		ffs->state = FFS_CLOSING;
+		ffs_data_reset(ffs);
+	}
+
+	ffs_data_put(ffs);
+}
+
+static struct ffs_data *ffs_data_new(void)
+{
+	struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
+	if (unlikely(!ffs))
+		return 0;
+
+	ENTER();
+
+	atomic_set(&ffs->ref, 1);
+	atomic_set(&ffs->opened, 0);
+	ffs->state = FFS_READ_DESCRIPTORS;
+	mutex_init(&ffs->mutex);
+	spin_lock_init(&ffs->eps_lock);
+	init_waitqueue_head(&ffs->ev.waitq);
+	init_completion(&ffs->ep0req_completion);
+
+	/* XXX REVISIT need to update it in some places, or do we? */
+	ffs->ev.can_stall = 1;
+
+	return ffs;
+}
+
+static void ffs_data_clear(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
+		functionfs_closed_callback(ffs);
+
+	BUG_ON(ffs->gadget);
+
+	if (ffs->epfiles)
+		ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+
+	kfree(ffs->raw_descs);
+	kfree(ffs->raw_strings);
+	kfree(ffs->stringtabs);
+}
+
+static void ffs_data_reset(struct ffs_data *ffs)
+{
+	ENTER();
+
+	ffs_data_clear(ffs);
+
+	ffs->epfiles = NULL;
+	ffs->raw_descs = NULL;
+	ffs->raw_strings = NULL;
+	ffs->stringtabs = NULL;
+
+	ffs->raw_descs_length = 0;
+	ffs->raw_fs_descs_length = 0;
+	ffs->fs_descs_count = 0;
+	ffs->hs_descs_count = 0;
+
+	ffs->strings_count = 0;
+	ffs->interfaces_count = 0;
+	ffs->eps_count = 0;
+
+	ffs->ev.count = 0;
+
+	ffs->state = FFS_READ_DESCRIPTORS;
+	ffs->setup_state = FFS_NO_SETUP;
+	ffs->flags = 0;
+}
+
+
+static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+{
+	struct usb_gadget_strings **lang;
+	int first_id;
+
+	ENTER();
+
+	if (WARN_ON(ffs->state != FFS_ACTIVE
+		 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
+		return -EBADFD;
+
+	first_id = usb_string_ids_n(cdev, ffs->strings_count);
+	if (unlikely(first_id < 0))
+		return first_id;
+
+	ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+	if (unlikely(!ffs->ep0req))
+		return -ENOMEM;
+	ffs->ep0req->complete = ffs_ep0_complete;
+	ffs->ep0req->context = ffs;
+
+	lang = ffs->stringtabs;
+	if (lang) {
+		for (; *lang; ++lang) {
+			struct usb_string *str = (*lang)->strings;
+			int id = first_id;
+			for (; str->s; ++id, ++str)
+				str->id = id;
+		}
+	}
+
+	ffs->gadget = cdev->gadget;
+	ffs_data_get(ffs);
+	return 0;
+}
+
+static void functionfs_unbind(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (!WARN_ON(!ffs->gadget)) {
+		usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+		ffs->ep0req = NULL;
+		ffs->gadget = NULL;
+		ffs_data_put(ffs);
+		clear_bit(FFS_FL_BOUND, &ffs->flags);
+	}
+}
+
+static int ffs_epfiles_create(struct ffs_data *ffs)
+{
+	struct ffs_epfile *epfile, *epfiles;
+	unsigned i, count;
+
+	ENTER();
+
+	count = ffs->eps_count;
+	epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
+	if (!epfiles)
+		return -ENOMEM;
+
+	epfile = epfiles;
+	for (i = 1; i <= count; ++i, ++epfile) {
+		epfile->ffs = ffs;
+		mutex_init(&epfile->mutex);
+		init_waitqueue_head(&epfile->wait);
+		sprintf(epfiles->name, "ep%u",  i);
+		if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
+						 &ffs_epfile_operations,
+						 &epfile->dentry))) {
+			ffs_epfiles_destroy(epfiles, i - 1);
+			return -ENOMEM;
+		}
+	}
+
+	ffs->epfiles = epfiles;
+	return 0;
+}
+
+static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
+{
+	struct ffs_epfile *epfile = epfiles;
+
+	ENTER();
+
+	for (; count; --count, ++epfile) {
+		BUG_ON(mutex_is_locked(&epfile->mutex) ||
+		       waitqueue_active(&epfile->wait));
+		if (epfile->dentry) {
+			d_delete(epfile->dentry);
+			dput(epfile->dentry);
+			epfile->dentry = NULL;
+		}
+	}
+
+	kfree(epfiles);
+}
+
+static int functionfs_bind_config(struct usb_composite_dev *cdev,
+				  struct usb_configuration *c,
+				  struct ffs_data *ffs)
+{
+	struct ffs_function *func;
+	int ret;
+
+	ENTER();
+
+	func = kzalloc(sizeof *func, GFP_KERNEL);
+	if (unlikely(!func))
+		return -ENOMEM;
+
+	func->function.name    = "Function FS Gadget";
+	func->function.strings = ffs->stringtabs;
+
+	func->function.bind    = ffs_func_bind;
+	func->function.unbind  = ffs_func_unbind;
+	func->function.set_alt = ffs_func_set_alt;
+	func->function.disable = ffs_func_disable;
+	func->function.setup   = ffs_func_setup;
+	func->function.suspend = ffs_func_suspend;
+	func->function.resume  = ffs_func_resume;
+
+	func->conf   = c;
+	func->gadget = cdev->gadget;
+	func->ffs = ffs;
+	ffs_data_get(ffs);
+
+	ret = usb_add_function(c, &func->function);
+	if (unlikely(ret))
+		ffs_func_free(func);
+
+	return ret;
+}
+
+static void ffs_func_free(struct ffs_function *func)
+{
+	ENTER();
+
+	ffs_data_put(func->ffs);
+
+	kfree(func->eps);
+	/*
+	 * eps and interfaces_nums are allocated in the same chunk so
+	 * only one free is required.  Descriptors are also allocated
+	 * in the same chunk.
+	 */
+
+	kfree(func);
+}
+
+static void ffs_func_eps_disable(struct ffs_function *func)
+{
+	struct ffs_ep *ep         = func->eps;
+	struct ffs_epfile *epfile = func->ffs->epfiles;
+	unsigned count            = func->ffs->eps_count;
+	unsigned long flags;
+
+	spin_lock_irqsave(&func->ffs->eps_lock, flags);
+	do {
+		/* pending requests get nuked */
+		if (likely(ep->ep))
+			usb_ep_disable(ep->ep);
+		epfile->ep = NULL;
+
+		++ep;
+		++epfile;
+	} while (--count);
+	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+}
+
+static int ffs_func_eps_enable(struct ffs_function *func)
+{
+	struct ffs_data *ffs      = func->ffs;
+	struct ffs_ep *ep         = func->eps;
+	struct ffs_epfile *epfile = ffs->epfiles;
+	unsigned count            = ffs->eps_count;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&func->ffs->eps_lock, flags);
+	do {
+		struct usb_endpoint_descriptor *ds;
+		int desc_idx = ffs->gadget->speed == USB_SPEED_HIGH ? 1 : 0;
+		ds = ep->descs[desc_idx];
+		if (!ds) {
+			ret = -EINVAL;
+			break;
+		}
+
+		ep->ep->driver_data = ep;
+		ep->ep->desc = ds;
+		ret = usb_ep_enable(ep->ep);
+		if (likely(!ret)) {
+			epfile->ep = ep;
+			epfile->in = usb_endpoint_dir_in(ds);
+			epfile->isoc = usb_endpoint_xfer_isoc(ds);
+		} else {
+			break;
+		}
+
+		wake_up(&epfile->wait);
+
+		++ep;
+		++epfile;
+	} while (--count);
+	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+	return ret;
+}
+
+
+/* Parsing and building descriptors and strings *****************************/
+
+/*
+ * This validates if data pointed by data is a valid USB descriptor as
+ * well as record how many interfaces, endpoints and strings are
+ * required by given configuration.  Returns address after the
+ * descriptor or NULL if data is invalid.
+ */
+
+enum ffs_entity_type {
+	FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
+};
+
+typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
+				   u8 *valuep,
+				   struct usb_descriptor_header *desc,
+				   void *priv);
+
+static int __must_check ffs_do_desc(char *data, unsigned len,
+				    ffs_entity_callback entity, void *priv)
+{
+	struct usb_descriptor_header *_ds = (void *)data;
+	u8 length;
+	int ret;
+
+	ENTER();
+
+	/* At least two bytes are required: length and type */
+	if (len < 2) {
+		pr_vdebug("descriptor too short\n");
+		return -EINVAL;
+	}
+
+	/* If we have at least as many bytes as the descriptor takes? */
+	length = _ds->bLength;
+	if (len < length) {
+		pr_vdebug("descriptor longer then available data\n");
+		return -EINVAL;
+	}
+
+#define __entity_check_INTERFACE(val)  1
+#define __entity_check_STRING(val)     (val)
+#define __entity_check_ENDPOINT(val)   ((val) & USB_ENDPOINT_NUMBER_MASK)
+#define __entity(type, val) do {					\
+		pr_vdebug("entity " #type "(%02x)\n", (val));		\
+		if (unlikely(!__entity_check_ ##type(val))) {		\
+			pr_vdebug("invalid entity's value\n");		\
+			return -EINVAL;					\
+		}							\
+		ret = entity(FFS_ ##type, &val, _ds, priv);		\
+		if (unlikely(ret < 0)) {				\
+			pr_debug("entity " #type "(%02x); ret = %d\n",	\
+				 (val), ret);				\
+			return ret;					\
+		}							\
+	} while (0)
+
+	/* Parse descriptor depending on type. */
+	switch (_ds->bDescriptorType) {
+	case USB_DT_DEVICE:
+	case USB_DT_CONFIG:
+	case USB_DT_STRING:
+	case USB_DT_DEVICE_QUALIFIER:
+		/* function can't have any of those */
+		pr_vdebug("descriptor reserved for gadget: %d\n",
+		      _ds->bDescriptorType);
+		return -EINVAL;
+
+	case USB_DT_INTERFACE: {
+		struct usb_interface_descriptor *ds = (void *)_ds;
+		pr_vdebug("interface descriptor\n");
+		if (length != sizeof *ds)
+			goto inv_length;
+
+		__entity(INTERFACE, ds->bInterfaceNumber);
+		if (ds->iInterface)
+			__entity(STRING, ds->iInterface);
+	}
+		break;
+
+	case USB_DT_ENDPOINT: {
+		struct usb_endpoint_descriptor *ds = (void *)_ds;
+		pr_vdebug("endpoint descriptor\n");
+		if (length != USB_DT_ENDPOINT_SIZE &&
+		    length != USB_DT_ENDPOINT_AUDIO_SIZE)
+			goto inv_length;
+		__entity(ENDPOINT, ds->bEndpointAddress);
+	}
+		break;
+
+	case USB_DT_OTG:
+		if (length != sizeof(struct usb_otg_descriptor))
+			goto inv_length;
+		break;
+
+	case USB_DT_INTERFACE_ASSOCIATION: {
+		struct usb_interface_assoc_descriptor *ds = (void *)_ds;
+		pr_vdebug("interface association descriptor\n");
+		if (length != sizeof *ds)
+			goto inv_length;
+		if (ds->iFunction)
+			__entity(STRING, ds->iFunction);
+	}
+		break;
+
+	case USB_DT_OTHER_SPEED_CONFIG:
+	case USB_DT_INTERFACE_POWER:
+	case USB_DT_DEBUG:
+	case USB_DT_SECURITY:
+	case USB_DT_CS_RADIO_CONTROL:
+		/* TODO */
+		pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
+		return -EINVAL;
+
+	default:
+		/* We should never be here */
+		pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
+		return -EINVAL;
+
+inv_length:
+		pr_vdebug("invalid length: %d (descriptor %d)\n",
+			  _ds->bLength, _ds->bDescriptorType);
+		return -EINVAL;
+	}
+
+#undef __entity
+#undef __entity_check_DESCRIPTOR
+#undef __entity_check_INTERFACE
+#undef __entity_check_STRING
+#undef __entity_check_ENDPOINT
+
+	return length;
+}
+
+static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
+				     ffs_entity_callback entity, void *priv)
+{
+	const unsigned _len = len;
+	unsigned long num = 0;
+
+	ENTER();
+
+	for (;;) {
+		int ret;
+
+		if (num == count)
+			data = NULL;
+
+		/* Record "descriptor" entity */
+		ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
+		if (unlikely(ret < 0)) {
+			pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
+				 num, ret);
+			return ret;
+		}
+
+		if (!data)
+			return _len - len;
+
+		ret = ffs_do_desc(data, len, entity, priv);
+		if (unlikely(ret < 0)) {
+			pr_debug("%s returns %d\n", __func__, ret);
+			return ret;
+		}
+
+		len -= ret;
+		data += ret;
+		++num;
+	}
+}
+
+static int __ffs_data_do_entity(enum ffs_entity_type type,
+				u8 *valuep, struct usb_descriptor_header *desc,
+				void *priv)
+{
+	struct ffs_data *ffs = priv;
+
+	ENTER();
+
+	switch (type) {
+	case FFS_DESCRIPTOR:
+		break;
+
+	case FFS_INTERFACE:
+		/*
+		 * Interfaces are indexed from zero so if we
+		 * encountered interface "n" then there are at least
+		 * "n+1" interfaces.
+		 */
+		if (*valuep >= ffs->interfaces_count)
+			ffs->interfaces_count = *valuep + 1;
+		break;
+
+	case FFS_STRING:
+		/*
+		 * Strings are indexed from 1 (0 is magic ;) reserved
+		 * for languages list or some such)
+		 */
+		if (*valuep > ffs->strings_count)
+			ffs->strings_count = *valuep;
+		break;
+
+	case FFS_ENDPOINT:
+		/* Endpoints are indexed from 1 as well. */
+		if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
+			ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+		break;
+	}
+
+	return 0;
+}
+
+static int __ffs_data_got_descs(struct ffs_data *ffs,
+				char *const _data, size_t len)
+{
+	unsigned fs_count, hs_count;
+	int fs_len, ret = -EINVAL;
+	char *data = _data;
+
+	ENTER();
+
+	if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC ||
+		     get_unaligned_le32(data + 4) != len))
+		goto error;
+	fs_count = get_unaligned_le32(data +  8);
+	hs_count = get_unaligned_le32(data + 12);
+
+	if (!fs_count && !hs_count)
+		goto einval;
+
+	data += 16;
+	len  -= 16;
+
+	if (likely(fs_count)) {
+		fs_len = ffs_do_descs(fs_count, data, len,
+				      __ffs_data_do_entity, ffs);
+		if (unlikely(fs_len < 0)) {
+			ret = fs_len;
+			goto error;
+		}
+
+		data += fs_len;
+		len  -= fs_len;
+	} else {
+		fs_len = 0;
+	}
+
+	if (likely(hs_count)) {
+		ret = ffs_do_descs(hs_count, data, len,
+				   __ffs_data_do_entity, ffs);
+		if (unlikely(ret < 0))
+			goto error;
+	} else {
+		ret = 0;
+	}
+
+	if (unlikely(len != ret))
+		goto einval;
+
+	ffs->raw_fs_descs_length = fs_len;
+	ffs->raw_descs_length    = fs_len + ret;
+	ffs->raw_descs           = _data;
+	ffs->fs_descs_count      = fs_count;
+	ffs->hs_descs_count      = hs_count;
+
+	return 0;
+
+einval:
+	ret = -EINVAL;
+error:
+	kfree(_data);
+	return ret;
+}
+
+static int __ffs_data_got_strings(struct ffs_data *ffs,
+				  char *const _data, size_t len)
+{
+	u32 str_count, needed_count, lang_count;
+	struct usb_gadget_strings **stringtabs, *t;
+	struct usb_string *strings, *s;
+	const char *data = _data;
+
+	ENTER();
+
+	if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+		     get_unaligned_le32(data + 4) != len))
+		goto error;
+	str_count  = get_unaligned_le32(data + 8);
+	lang_count = get_unaligned_le32(data + 12);
+
+	/* if one is zero the other must be zero */
+	if (unlikely(!str_count != !lang_count))
+		goto error;
+
+	/* Do we have at least as many strings as descriptors need? */
+	needed_count = ffs->strings_count;
+	if (unlikely(str_count < needed_count))
+		goto error;
+
+	/*
+	 * If we don't need any strings just return and free all
+	 * memory.
+	 */
+	if (!needed_count) {
+		kfree(_data);
+		return 0;
+	}
+
+	/* Allocate everything in one chunk so there's less maintenance. */
+	{
+		struct {
+			struct usb_gadget_strings *stringtabs[lang_count + 1];
+			struct usb_gadget_strings stringtab[lang_count];
+			struct usb_string strings[lang_count*(needed_count+1)];
+		} *d;
+		unsigned i = 0;
+
+		d = kmalloc(sizeof *d, GFP_KERNEL);
+		if (unlikely(!d)) {
+			kfree(_data);
+			return -ENOMEM;
+		}
+
+		stringtabs = d->stringtabs;
+		t = d->stringtab;
+		i = lang_count;
+		do {
+			*stringtabs++ = t++;
+		} while (--i);
+		*stringtabs = NULL;
+
+		stringtabs = d->stringtabs;
+		t = d->stringtab;
+		s = d->strings;
+		strings = s;
+	}
+
+	/* For each language */
+	data += 16;
+	len -= 16;
+
+	do { /* lang_count > 0 so we can use do-while */
+		unsigned needed = needed_count;
+
+		if (unlikely(len < 3))
+			goto error_free;
+		t->language = get_unaligned_le16(data);
+		t->strings  = s;
+		++t;
+
+		data += 2;
+		len -= 2;
+
+		/* For each string */
+		do { /* str_count > 0 so we can use do-while */
+			size_t length = strnlen(data, len);
+
+			if (unlikely(length == len))
+				goto error_free;
+
+			/*
+			 * User may provide more strings then we need,
+			 * if that's the case we simply ignore the
+			 * rest
+			 */
+			if (likely(needed)) {
+				/*
+				 * s->id will be set while adding
+				 * function to configuration so for
+				 * now just leave garbage here.
+				 */
+				s->s = data;
+				--needed;
+				++s;
+			}
+
+			data += length + 1;
+			len -= length + 1;
+		} while (--str_count);
+
+		s->id = 0;   /* terminator */
+		s->s = NULL;
+		++s;
+
+	} while (--lang_count);
+
+	/* Some garbage left? */
+	if (unlikely(len))
+		goto error_free;
+
+	/* Done! */
+	ffs->stringtabs = stringtabs;
+	ffs->raw_strings = _data;
+
+	return 0;
+
+error_free:
+	kfree(stringtabs);
+error:
+	kfree(_data);
+	return -EINVAL;
+}
+
+
+/* Events handling and management *******************************************/
+
+static void __ffs_event_add(struct ffs_data *ffs,
+			    enum usb_functionfs_event_type type)
+{
+	enum usb_functionfs_event_type rem_type1, rem_type2 = type;
+	int neg = 0;
+
+	/*
+	 * Abort any unhandled setup
+	 *
+	 * We do not need to worry about some cmpxchg() changing value
+	 * of ffs->setup_state without holding the lock because when
+	 * state is FFS_SETUP_PENDING cmpxchg() in several places in
+	 * the source does nothing.
+	 */
+	if (ffs->setup_state == FFS_SETUP_PENDING)
+		ffs->setup_state = FFS_SETUP_CANCELED;
+
+	switch (type) {
+	case FUNCTIONFS_RESUME:
+		rem_type2 = FUNCTIONFS_SUSPEND;
+		/* FALL THROUGH */
+	case FUNCTIONFS_SUSPEND:
+	case FUNCTIONFS_SETUP:
+		rem_type1 = type;
+		/* Discard all similar events */
+		break;
+
+	case FUNCTIONFS_BIND:
+	case FUNCTIONFS_UNBIND:
+	case FUNCTIONFS_DISABLE:
+	case FUNCTIONFS_ENABLE:
+		/* Discard everything other then power management. */
+		rem_type1 = FUNCTIONFS_SUSPEND;
+		rem_type2 = FUNCTIONFS_RESUME;
+		neg = 1;
+		break;
+
+	default:
+		BUG();
+	}
+
+	{
+		u8 *ev  = ffs->ev.types, *out = ev;
+		unsigned n = ffs->ev.count;
+		for (; n; --n, ++ev)
+			if ((*ev == rem_type1 || *ev == rem_type2) == neg)
+				*out++ = *ev;
+			else
+				pr_vdebug("purging event %d\n", *ev);
+		ffs->ev.count = out - ffs->ev.types;
+	}
+
+	pr_vdebug("adding event %d\n", type);
+	ffs->ev.types[ffs->ev.count++] = type;
+	wake_up_locked(&ffs->ev.waitq);
+}
+
+static void ffs_event_add(struct ffs_data *ffs,
+			  enum usb_functionfs_event_type type)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+	__ffs_event_add(ffs, type);
+	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+}
+
+
+/* Bind/unbind USB function hooks *******************************************/
+
+static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
+				    struct usb_descriptor_header *desc,
+				    void *priv)
+{
+	struct usb_endpoint_descriptor *ds = (void *)desc;
+	struct ffs_function *func = priv;
+	struct ffs_ep *ffs_ep;
+
+	/*
+	 * If hs_descriptors is not NULL then we are reading hs
+	 * descriptors now
+	 */
+	const int isHS = func->function.hs_descriptors != NULL;
+	unsigned idx;
+
+	if (type != FFS_DESCRIPTOR)
+		return 0;
+
+	if (isHS)
+		func->function.hs_descriptors[(long)valuep] = desc;
+	else
+		func->function.descriptors[(long)valuep]    = desc;
+
+	if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+		return 0;
+
+	idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+	ffs_ep = func->eps + idx;
+
+	if (unlikely(ffs_ep->descs[isHS])) {
+		pr_vdebug("two %sspeed descriptors for EP %d\n",
+			  isHS ? "high" : "full",
+			  ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+		return -EINVAL;
+	}
+	ffs_ep->descs[isHS] = ds;
+
+	ffs_dump_mem(": Original  ep desc", ds, ds->bLength);
+	if (ffs_ep->ep) {
+		ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
+		if (!ds->wMaxPacketSize)
+			ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
+	} else {
+		struct usb_request *req;
+		struct usb_ep *ep;
+
+		pr_vdebug("autoconfig\n");
+		ep = usb_ep_autoconfig(func->gadget, ds);
+		if (unlikely(!ep))
+			return -ENOTSUPP;
+		ep->driver_data = func->eps + idx;
+
+		req = usb_ep_alloc_request(ep, GFP_KERNEL);
+		if (unlikely(!req))
+			return -ENOMEM;
+
+		ffs_ep->ep  = ep;
+		ffs_ep->req = req;
+		func->eps_revmap[ds->bEndpointAddress &
+				 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
+	}
+	ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
+
+	return 0;
+}
+
+static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
+				   struct usb_descriptor_header *desc,
+				   void *priv)
+{
+	struct ffs_function *func = priv;
+	unsigned idx;
+	u8 newValue;
+
+	switch (type) {
+	default:
+	case FFS_DESCRIPTOR:
+		/* Handled in previous pass by __ffs_func_bind_do_descs() */
+		return 0;
+
+	case FFS_INTERFACE:
+		idx = *valuep;
+		if (func->interfaces_nums[idx] < 0) {
+			int id = usb_interface_id(func->conf, &func->function);
+			if (unlikely(id < 0))
+				return id;
+			func->interfaces_nums[idx] = id;
+		}
+		newValue = func->interfaces_nums[idx];
+		break;
+
+	case FFS_STRING:
+		/* String' IDs are allocated when fsf_data is bound to cdev */
+		newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
+		break;
+
+	case FFS_ENDPOINT:
+		/*
+		 * USB_DT_ENDPOINT are handled in
+		 * __ffs_func_bind_do_descs().
+		 */
+		if (desc->bDescriptorType == USB_DT_ENDPOINT)
+			return 0;
+
+		idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
+		if (unlikely(!func->eps[idx].ep))
+			return -EINVAL;
+
+		{
+			struct usb_endpoint_descriptor **descs;
+			descs = func->eps[idx].descs;
+			newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
+		}
+		break;
+	}
+
+	pr_vdebug("%02x -> %02x\n", *valuep, newValue);
+	*valuep = newValue;
+	return 0;
+}
+
+static int ffs_func_bind(struct usb_configuration *c,
+			 struct usb_function *f)
+{
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+
+	const int full = !!func->ffs->fs_descs_count;
+	const int high = gadget_is_dualspeed(func->gadget) &&
+		func->ffs->hs_descs_count;
+
+	int ret;
+
+	/* Make it a single chunk, less management later on */
+	struct {
+		struct ffs_ep eps[ffs->eps_count];
+		struct usb_descriptor_header
+			*fs_descs[full ? ffs->fs_descs_count + 1 : 0];
+		struct usb_descriptor_header
+			*hs_descs[high ? ffs->hs_descs_count + 1 : 0];
+		short inums[ffs->interfaces_count];
+		char raw_descs[high ? ffs->raw_descs_length
+				    : ffs->raw_fs_descs_length];
+	} *data;
+
+	ENTER();
+
+	/* Only high speed but not supported by gadget? */
+	if (unlikely(!(full | high)))
+		return -ENOTSUPP;
+
+	/* Allocate */
+	data = kmalloc(sizeof *data, GFP_KERNEL);
+	if (unlikely(!data))
+		return -ENOMEM;
+
+	/* Zero */
+	memset(data->eps, 0, sizeof data->eps);
+	memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs);
+	memset(data->inums, 0xff, sizeof data->inums);
+	for (ret = ffs->eps_count; ret; --ret)
+		data->eps[ret].num = -1;
+
+	/* Save pointers */
+	func->eps             = data->eps;
+	func->interfaces_nums = data->inums;
+
+	/*
+	 * Go through all the endpoint descriptors and allocate
+	 * endpoints first, so that later we can rewrite the endpoint
+	 * numbers without worrying that it may be described later on.
+	 */
+	if (likely(full)) {
+		func->function.descriptors = data->fs_descs;
+		ret = ffs_do_descs(ffs->fs_descs_count,
+				   data->raw_descs,
+				   sizeof data->raw_descs,
+				   __ffs_func_bind_do_descs, func);
+		if (unlikely(ret < 0))
+			goto error;
+	} else {
+		ret = 0;
+	}
+
+	if (likely(high)) {
+		func->function.hs_descriptors = data->hs_descs;
+		ret = ffs_do_descs(ffs->hs_descs_count,
+				   data->raw_descs + ret,
+				   (sizeof data->raw_descs) - ret,
+				   __ffs_func_bind_do_descs, func);
+	}
+
+	/*
+	 * Now handle interface numbers allocation and interface and
+	 * endpoint numbers rewriting.  We can do that in one go
+	 * now.
+	 */
+	ret = ffs_do_descs(ffs->fs_descs_count +
+			   (high ? ffs->hs_descs_count : 0),
+			   data->raw_descs, sizeof data->raw_descs,
+			   __ffs_func_bind_do_nums, func);
+	if (unlikely(ret < 0))
+		goto error;
+
+	/* And we're done */
+	ffs_event_add(ffs, FUNCTIONFS_BIND);
+	return 0;
+
+error:
+	/* XXX Do we need to release all claimed endpoints here? */
+	return ret;
+}
+
+
+/* Other USB function hooks *************************************************/
+
+static void ffs_func_unbind(struct usb_configuration *c,
+			    struct usb_function *f)
+{
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+
+	ENTER();
+
+	if (ffs->func == func) {
+		ffs_func_eps_disable(func);
+		ffs->func = NULL;
+	}
+
+	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+	ffs_func_free(func);
+}
+
+static int ffs_func_set_alt(struct usb_function *f,
+			    unsigned interface, unsigned alt)
+{
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+	int ret = 0, intf;
+
+	if (alt != (unsigned)-1) {
+		intf = ffs_func_revmap_intf(func, interface);
+		if (unlikely(intf < 0))
+			return intf;
+	}
+
+	if (ffs->func)
+		ffs_func_eps_disable(ffs->func);
+
+	if (ffs->state != FFS_ACTIVE)
+		return -ENODEV;
+
+	if (alt == (unsigned)-1) {
+		ffs->func = NULL;
+		ffs_event_add(ffs, FUNCTIONFS_DISABLE);
+		return 0;
+	}
+
+	ffs->func = func;
+	ret = ffs_func_eps_enable(func);
+	if (likely(ret >= 0))
+		ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+	return ret;
+}
+
+static void ffs_func_disable(struct usb_function *f)
+{
+	ffs_func_set_alt(f, 0, (unsigned)-1);
+}
+
+static int ffs_func_setup(struct usb_function *f,
+			  const struct usb_ctrlrequest *creq)
+{
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+	unsigned long flags;
+	int ret;
+
+	ENTER();
+
+	pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
+	pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
+	pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
+	pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
+	pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
+
+	/*
+	 * Most requests directed to interface go through here
+	 * (notable exceptions are set/get interface) so we need to
+	 * handle them.  All other either handled by composite or
+	 * passed to usb_configuration->setup() (if one is set).  No
+	 * matter, we will handle requests directed to endpoint here
+	 * as well (as it's straightforward) but what to do with any
+	 * other request?
+	 */
+	if (ffs->state != FFS_ACTIVE)
+		return -ENODEV;
+
+	switch (creq->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_INTERFACE:
+		ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
+		if (unlikely(ret < 0))
+			return ret;
+		break;
+
+	case USB_RECIP_ENDPOINT:
+		ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
+		if (unlikely(ret < 0))
+			return ret;
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+	ffs->ev.setup = *creq;
+	ffs->ev.setup.wIndex = cpu_to_le16(ret);
+	__ffs_event_add(ffs, FUNCTIONFS_SETUP);
+	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+
+	return 0;
+}
+
+static void ffs_func_suspend(struct usb_function *f)
+{
+	ENTER();
+	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+}
+
+static void ffs_func_resume(struct usb_function *f)
+{
+	ENTER();
+	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+}
+
+
+/* Endpoint and interface numbers reverse mapping ***************************/
+
+static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
+{
+	num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
+	return num ? num : -EDOM;
+}
+
+static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
+{
+	short *nums = func->interfaces_nums;
+	unsigned count = func->ffs->interfaces_count;
+
+	for (; count; --count, ++nums) {
+		if (*nums >= 0 && *nums == intf)
+			return nums - func->interfaces_nums;
+	}
+
+	return -EDOM;
+}
+
+
+/* Misc helper functions ****************************************************/
+
+static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+{
+	return nonblock
+		? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
+		: mutex_lock_interruptible(mutex);
+}
+
+static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+{
+	char *data;
+
+	if (unlikely(!len))
+		return NULL;
+
+	data = kmalloc(len, GFP_KERNEL);
+	if (unlikely(!data))
+		return ERR_PTR(-ENOMEM);
+
+	if (unlikely(__copy_from_user(data, buf, len))) {
+		kfree(data);
+		return ERR_PTR(-EFAULT);
+	}
+
+	pr_vdebug("Buffer from user space:\n");
+	ffs_dump_mem("", data, len);
+
+	return data;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_hid.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_hid.c
new file mode 100644
index 0000000..b211342
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_hid.c
@@ -0,0 +1,659 @@
+/*
+ * f_hid.c -- USB HID function driver
+ *
+ * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/hid.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/usb/g_hid.h>
+
+static int major, minors;
+static struct class *hidg_class;
+
+/*-------------------------------------------------------------------------*/
+/*                            HID gadget struct                            */
+
+struct f_hidg {
+	/* configuration */
+	unsigned char			bInterfaceSubClass;
+	unsigned char			bInterfaceProtocol;
+	unsigned short			report_desc_length;
+	char				*report_desc;
+	unsigned short			report_length;
+
+	/* recv report */
+	char				*set_report_buff;
+	unsigned short			set_report_length;
+	spinlock_t			spinlock;
+	wait_queue_head_t		read_queue;
+
+	/* send report */
+	struct mutex			lock;
+	bool				write_pending;
+	wait_queue_head_t		write_queue;
+	struct usb_request		*req;
+
+	int				minor;
+	struct cdev			cdev;
+	struct usb_function		func;
+	struct usb_ep			*in_ep;
+};
+
+static inline struct f_hidg *func_to_hidg(struct usb_function *f)
+{
+	return container_of(f, struct f_hidg, func);
+}
+
+/*-------------------------------------------------------------------------*/
+/*                           Static descriptors                            */
+
+static struct usb_interface_descriptor hidg_interface_desc = {
+	.bLength		= sizeof hidg_interface_desc,
+	.bDescriptorType	= USB_DT_INTERFACE,
+	/* .bInterfaceNumber	= DYNAMIC */
+	.bAlternateSetting	= 0,
+	.bNumEndpoints		= 1,
+	.bInterfaceClass	= USB_CLASS_HID,
+	/* .bInterfaceSubClass	= DYNAMIC */
+	/* .bInterfaceProtocol	= DYNAMIC */
+	/* .iInterface		= DYNAMIC */
+};
+
+static struct hid_descriptor hidg_desc = {
+	.bLength			= sizeof hidg_desc,
+	.bDescriptorType		= HID_DT_HID,
+	.bcdHID				= 0x0101,
+	.bCountryCode			= 0x00,
+	.bNumDescriptors		= 0x1,
+	/*.desc[0].bDescriptorType	= DYNAMIC */
+	/*.desc[0].wDescriptorLenght	= DYNAMIC */
+};
+
+/* High-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	/*.wMaxPacketSize	= DYNAMIC */
+	.bInterval		= 4, /* FIXME: Add this field in the
+				      * HID gadget configuration?
+				      * (struct hidg_func_descriptor)
+				      */
+};
+
+static struct usb_descriptor_header *hidg_hs_descriptors[] = {
+	(struct usb_descriptor_header *)&hidg_interface_desc,
+	(struct usb_descriptor_header *)&hidg_desc,
+	(struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
+	NULL,
+};
+
+/* Full-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	/*.wMaxPacketSize	= DYNAMIC */
+	.bInterval		= 10, /* FIXME: Add this field in the
+				       * HID gadget configuration?
+				       * (struct hidg_func_descriptor)
+				       */
+};
+
+static struct usb_descriptor_header *hidg_fs_descriptors[] = {
+	(struct usb_descriptor_header *)&hidg_interface_desc,
+	(struct usb_descriptor_header *)&hidg_desc,
+	(struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+/*                              Char Device                                */
+
+static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+			size_t count, loff_t *ptr)
+{
+	struct f_hidg	*hidg     = file->private_data;
+	char		*tmp_buff = NULL;
+	unsigned long	flags;
+
+	if (!count)
+		return 0;
+
+	if (!access_ok(VERIFY_WRITE, buffer, count))
+		return -EFAULT;
+
+	spin_lock_irqsave(&hidg->spinlock, flags);
+
+#define READ_COND (hidg->set_report_buff != NULL)
+
+	while (!READ_COND) {
+		spin_unlock_irqrestore(&hidg->spinlock, flags);
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		if (wait_event_interruptible(hidg->read_queue, READ_COND))
+			return -ERESTARTSYS;
+
+		spin_lock_irqsave(&hidg->spinlock, flags);
+	}
+
+
+	count = min_t(unsigned, count, hidg->set_report_length);
+	tmp_buff = hidg->set_report_buff;
+	hidg->set_report_buff = NULL;
+
+	spin_unlock_irqrestore(&hidg->spinlock, flags);
+
+	if (tmp_buff != NULL) {
+		/* copy to user outside spinlock */
+		count -= copy_to_user(buffer, tmp_buff, count);
+		kfree(tmp_buff);
+	} else
+		count = -ENOMEM;
+
+	return count;
+}
+
+static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
+
+	if (req->status != 0) {
+		ERROR(hidg->func.config->cdev,
+			"End Point Request ERROR: %d\n", req->status);
+	}
+
+	hidg->write_pending = 0;
+	wake_up(&hidg->write_queue);
+}
+
+static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
+			    size_t count, loff_t *offp)
+{
+	struct f_hidg *hidg  = file->private_data;
+	ssize_t status = -ENOMEM;
+
+	if (!access_ok(VERIFY_READ, buffer, count))
+		return -EFAULT;
+
+	mutex_lock(&hidg->lock);
+
+#define WRITE_COND (!hidg->write_pending)
+
+	/* write queue */
+	while (!WRITE_COND) {
+		mutex_unlock(&hidg->lock);
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		if (wait_event_interruptible_exclusive(
+				hidg->write_queue, WRITE_COND))
+			return -ERESTARTSYS;
+
+		mutex_lock(&hidg->lock);
+	}
+
+	count  = min_t(unsigned, count, hidg->report_length);
+	status = copy_from_user(hidg->req->buf, buffer, count);
+
+	if (status != 0) {
+		ERROR(hidg->func.config->cdev,
+			"copy_from_user error\n");
+		mutex_unlock(&hidg->lock);
+		return -EINVAL;
+	}
+
+	hidg->req->status   = 0;
+	hidg->req->zero     = 0;
+	hidg->req->length   = count;
+	hidg->req->complete = f_hidg_req_complete;
+	hidg->req->context  = hidg;
+	hidg->write_pending = 1;
+
+	status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
+	if (status < 0) {
+		ERROR(hidg->func.config->cdev,
+			"usb_ep_queue error on int endpoint %zd\n", status);
+		hidg->write_pending = 0;
+		wake_up(&hidg->write_queue);
+	} else {
+		status = count;
+	}
+
+	mutex_unlock(&hidg->lock);
+
+	return status;
+}
+
+static unsigned int f_hidg_poll(struct file *file, poll_table *wait)
+{
+	struct f_hidg	*hidg  = file->private_data;
+	unsigned int	ret = 0;
+
+	poll_wait(file, &hidg->read_queue, wait);
+	poll_wait(file, &hidg->write_queue, wait);
+
+	if (WRITE_COND)
+		ret |= POLLOUT | POLLWRNORM;
+
+	if (READ_COND)
+		ret |= POLLIN | POLLRDNORM;
+
+	return ret;
+}
+
+#undef WRITE_COND
+#undef READ_COND
+
+static int f_hidg_release(struct inode *inode, struct file *fd)
+{
+	fd->private_data = NULL;
+	return 0;
+}
+
+static int f_hidg_open(struct inode *inode, struct file *fd)
+{
+	struct f_hidg *hidg =
+		container_of(inode->i_cdev, struct f_hidg, cdev);
+
+	fd->private_data = hidg;
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+/*                                usb_function                             */
+
+static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_hidg *hidg = (struct f_hidg *)req->context;
+
+	if (req->status != 0 || req->buf == NULL || req->actual == 0) {
+		ERROR(hidg->func.config->cdev, "%s FAILED\n", __func__);
+		return;
+	}
+
+	spin_lock(&hidg->spinlock);
+
+	hidg->set_report_buff = krealloc(hidg->set_report_buff,
+					 req->actual, GFP_ATOMIC);
+
+	if (hidg->set_report_buff == NULL) {
+		spin_unlock(&hidg->spinlock);
+		return;
+	}
+	hidg->set_report_length = req->actual;
+	memcpy(hidg->set_report_buff, req->buf, req->actual);
+
+	spin_unlock(&hidg->spinlock);
+
+	wake_up(&hidg->read_queue);
+}
+
+static int hidg_setup(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_hidg			*hidg = func_to_hidg(f);
+	struct usb_composite_dev	*cdev = f->config->cdev;
+	struct usb_request		*req  = cdev->req;
+	int status = 0;
+	__u16 value, length;
+
+	value	= __le16_to_cpu(ctrl->wValue);
+	length	= __le16_to_cpu(ctrl->wLength);
+
+	VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x "
+		"Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value);
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+		  | HID_REQ_GET_REPORT):
+		VDBG(cdev, "get_report\n");
+
+		/* send an empty report */
+		length = min_t(unsigned, length, hidg->report_length);
+		memset(req->buf, 0x0, length);
+
+		goto respond;
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+		  | HID_REQ_GET_PROTOCOL):
+		VDBG(cdev, "get_protocol\n");
+		goto stall;
+		break;
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+		  | HID_REQ_SET_REPORT):
+		VDBG(cdev, "set_report | wLenght=%d\n", ctrl->wLength);
+		req->context  = hidg;
+		req->complete = hidg_set_report_complete;
+		goto respond;
+		break;
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+		  | HID_REQ_SET_PROTOCOL):
+		VDBG(cdev, "set_protocol\n");
+		goto stall;
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
+		  | USB_REQ_GET_DESCRIPTOR):
+		switch (value >> 8) {
+		case HID_DT_HID:
+			VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
+			length = min_t(unsigned short, length,
+						   hidg_desc.bLength);
+			memcpy(req->buf, &hidg_desc, length);
+			goto respond;
+			break;
+		case HID_DT_REPORT:
+			VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
+			length = min_t(unsigned short, length,
+						   hidg->report_desc_length);
+			memcpy(req->buf, hidg->report_desc, length);
+			goto respond;
+			break;
+
+		default:
+			VDBG(cdev, "Unknown decriptor request 0x%x\n",
+				 value >> 8);
+			goto stall;
+			break;
+		}
+		break;
+
+	default:
+		VDBG(cdev, "Unknown request 0x%x\n",
+			 ctrl->bRequest);
+		goto stall;
+		break;
+	}
+
+stall:
+	return -EOPNOTSUPP;
+
+respond:
+	req->zero = 0;
+	req->length = length;
+	status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+	if (status < 0)
+		ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value);
+	return status;
+}
+
+static void hidg_disable(struct usb_function *f)
+{
+	struct f_hidg *hidg = func_to_hidg(f);
+
+	usb_ep_disable(hidg->in_ep);
+	hidg->in_ep->driver_data = NULL;
+}
+
+static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct usb_composite_dev		*cdev = f->config->cdev;
+	struct f_hidg				*hidg = func_to_hidg(f);
+	int status = 0;
+
+	VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
+
+	if (hidg->in_ep != NULL) {
+		/* restart endpoint */
+		if (hidg->in_ep->driver_data != NULL)
+			usb_ep_disable(hidg->in_ep);
+
+		status = config_ep_by_speed(f->config->cdev->gadget, f,
+					    hidg->in_ep);
+		if (status) {
+			ERROR(cdev, "config_ep_by_speed FAILED!\n");
+			goto fail;
+		}
+		status = usb_ep_enable(hidg->in_ep);
+		if (status < 0) {
+			ERROR(cdev, "Enable endpoint FAILED!\n");
+			goto fail;
+		}
+		hidg->in_ep->driver_data = hidg;
+	}
+fail:
+	return status;
+}
+
+const struct file_operations f_hidg_fops = {
+	.owner		= THIS_MODULE,
+	.open		= f_hidg_open,
+	.release	= f_hidg_release,
+	.write		= f_hidg_write,
+	.read		= f_hidg_read,
+	.poll		= f_hidg_poll,
+	.llseek		= noop_llseek,
+};
+
+static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_ep		*ep;
+	struct f_hidg		*hidg = func_to_hidg(f);
+	int			status;
+	dev_t			dev;
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	hidg_interface_desc.bInterfaceNumber = status;
+
+
+	/* allocate instance-specific endpoints */
+	status = -ENODEV;
+	ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc);
+	if (!ep)
+		goto fail;
+	ep->driver_data = c->cdev;	/* claim */
+	hidg->in_ep = ep;
+
+	/* preallocate request and buffer */
+	status = -ENOMEM;
+	hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL);
+	if (!hidg->req)
+		goto fail;
+
+
+	hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL);
+	if (!hidg->req->buf)
+		goto fail;
+
+	/* set descriptor dynamic values */
+	hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
+	hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+	hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+	hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+	hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
+	hidg_desc.desc[0].wDescriptorLength =
+		cpu_to_le16(hidg->report_desc_length);
+
+	hidg->set_report_buff = NULL;
+
+	/* copy descriptors */
+	f->descriptors = usb_copy_descriptors(hidg_fs_descriptors);
+	if (!f->descriptors)
+		goto fail;
+
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hidg_hs_in_ep_desc.bEndpointAddress =
+			hidg_fs_in_ep_desc.bEndpointAddress;
+		f->hs_descriptors = usb_copy_descriptors(hidg_hs_descriptors);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	mutex_init(&hidg->lock);
+	spin_lock_init(&hidg->spinlock);
+	init_waitqueue_head(&hidg->write_queue);
+	init_waitqueue_head(&hidg->read_queue);
+
+	/* create char device */
+	cdev_init(&hidg->cdev, &f_hidg_fops);
+	dev = MKDEV(major, hidg->minor);
+	status = cdev_add(&hidg->cdev, dev, 1);
+	if (status)
+		goto fail;
+
+	device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor);
+
+	return 0;
+
+fail:
+	ERROR(f->config->cdev, "hidg_bind FAILED\n");
+	if (hidg->req != NULL) {
+		kfree(hidg->req->buf);
+		if (hidg->in_ep != NULL)
+			usb_ep_free_request(hidg->in_ep, hidg->req);
+	}
+
+	usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	return status;
+}
+
+static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_hidg *hidg = func_to_hidg(f);
+
+	device_destroy(hidg_class, MKDEV(major, hidg->minor));
+	cdev_del(&hidg->cdev);
+
+	/* disable/free request and end point */
+	usb_ep_disable(hidg->in_ep);
+	usb_ep_dequeue(hidg->in_ep, hidg->req);
+	kfree(hidg->req->buf);
+	usb_ep_free_request(hidg->in_ep, hidg->req);
+
+	/* free descriptors copies */
+	usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(hidg->report_desc);
+	kfree(hidg->set_report_buff);
+	kfree(hidg);
+}
+
+/*-------------------------------------------------------------------------*/
+/*                                 Strings                                 */
+
+#define CT_FUNC_HID_IDX	0
+
+static struct usb_string ct_func_string_defs[] = {
+	[CT_FUNC_HID_IDX].s	= "HID Interface",
+	{},			/* end of list */
+};
+
+static struct usb_gadget_strings ct_func_string_table = {
+	.language	= 0x0409,	/* en-US */
+	.strings	= ct_func_string_defs,
+};
+
+static struct usb_gadget_strings *ct_func_strings[] = {
+	&ct_func_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+/*                             usb_configuration                           */
+
+int __init hidg_bind_config(struct usb_configuration *c,
+			    struct hidg_func_descriptor *fdesc, int index)
+{
+	struct f_hidg *hidg;
+	int status;
+
+	if (index >= minors)
+		return -ENOENT;
+
+	/* maybe allocate device-global string IDs, and patch descriptors */
+	if (ct_func_string_defs[CT_FUNC_HID_IDX].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ct_func_string_defs[CT_FUNC_HID_IDX].id = status;
+		hidg_interface_desc.iInterface = status;
+	}
+
+	/* allocate and initialize one new instance */
+	hidg = kzalloc(sizeof *hidg, GFP_KERNEL);
+	if (!hidg)
+		return -ENOMEM;
+
+	hidg->minor = index;
+	hidg->bInterfaceSubClass = fdesc->subclass;
+	hidg->bInterfaceProtocol = fdesc->protocol;
+	hidg->report_length = fdesc->report_length;
+	hidg->report_desc_length = fdesc->report_desc_length;
+	hidg->report_desc = kmemdup(fdesc->report_desc,
+				    fdesc->report_desc_length,
+				    GFP_KERNEL);
+	if (!hidg->report_desc) {
+		kfree(hidg);
+		return -ENOMEM;
+	}
+
+	hidg->func.name    = "hid";
+	hidg->func.strings = ct_func_strings;
+	hidg->func.bind    = hidg_bind;
+	hidg->func.unbind  = hidg_unbind;
+	hidg->func.set_alt = hidg_set_alt;
+	hidg->func.disable = hidg_disable;
+	hidg->func.setup   = hidg_setup;
+
+	status = usb_add_function(c, &hidg->func);
+	if (status)
+		kfree(hidg);
+
+	return status;
+}
+
+int __init ghid_setup(struct usb_gadget *g, int count)
+{
+	int status;
+	dev_t dev;
+
+	hidg_class = class_create(THIS_MODULE, "hidg");
+
+	status = alloc_chrdev_region(&dev, 0, count, "hidg");
+	if (!status) {
+		major = MAJOR(dev);
+		minors = count;
+	}
+
+	return status;
+}
+
+void ghid_cleanup(void)
+{
+	if (major) {
+		unregister_chrdev_region(MKDEV(major, 0), minors);
+		major = minors = 0;
+	}
+
+	class_destroy(hidg_class);
+	hidg_class = NULL;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_loopback.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_loopback.c
new file mode 100644
index 0000000..2c0cd82
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_loopback.c
@@ -0,0 +1,430 @@
+/*
+ * f_loopback.c - USB peripheral loopback configuration driver
+ *
+ * Copyright (C) 2003-2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include "g_zero.h"
+#include "gadget_chips.h"
+
+
+/*
+ * LOOPBACK FUNCTION ... a testing vehicle for USB peripherals,
+ *
+ * This takes messages of various sizes written OUT to a device, and loops
+ * them back so they can be read IN from it.  It has been used by certain
+ * test applications.  It supports limited testing of data queueing logic.
+ *
+ *
+ * This is currently packaged as a configuration driver, which can't be
+ * combined with other functions to make composite devices.  However, it
+ * can be combined with other independent configurations.
+ */
+struct f_loopback {
+	struct usb_function	function;
+
+	struct usb_ep		*in_ep;
+	struct usb_ep		*out_ep;
+};
+
+static inline struct f_loopback *func_to_loop(struct usb_function *f)
+{
+	return container_of(f, struct f_loopback, function);
+}
+
+static unsigned qlen = 32;
+module_param(qlen, uint, 0);
+MODULE_PARM_DESC(qlenn, "depth of loopback queue");
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_interface_descriptor loopback_intf = {
+	.bLength =		sizeof loopback_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_loop_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_loop_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_loopback_descs[] = {
+	(struct usb_descriptor_header *) &loopback_intf,
+	(struct usb_descriptor_header *) &fs_loop_sink_desc,
+	(struct usb_descriptor_header *) &fs_loop_source_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_loop_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_loop_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *hs_loopback_descs[] = {
+	(struct usb_descriptor_header *) &loopback_intf,
+	(struct usb_descriptor_header *) &hs_loop_source_desc,
+	(struct usb_descriptor_header *) &hs_loop_sink_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_loop_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+struct usb_ss_ep_comp_descriptor ss_loop_source_comp_desc = {
+	.bLength =		USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst =		0,
+	.bmAttributes =		0,
+	.wBytesPerInterval =	0,
+};
+
+static struct usb_endpoint_descriptor ss_loop_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+struct usb_ss_ep_comp_descriptor ss_loop_sink_comp_desc = {
+	.bLength =		USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst =		0,
+	.bmAttributes =		0,
+	.wBytesPerInterval =	0,
+};
+
+static struct usb_descriptor_header *ss_loopback_descs[] = {
+	(struct usb_descriptor_header *) &loopback_intf,
+	(struct usb_descriptor_header *) &ss_loop_source_desc,
+	(struct usb_descriptor_header *) &ss_loop_source_comp_desc,
+	(struct usb_descriptor_header *) &ss_loop_sink_desc,
+	(struct usb_descriptor_header *) &ss_loop_sink_comp_desc,
+	NULL,
+};
+
+/* function-specific strings: */
+
+static struct usb_string strings_loopback[] = {
+	[0].s = "loop input to output",
+	{  }			/* end of list */
+};
+
+static struct usb_gadget_strings stringtab_loop = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_loopback,
+};
+
+static struct usb_gadget_strings *loopback_strings[] = {
+	&stringtab_loop,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init
+loopback_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_loopback	*loop = func_to_loop(f);
+	int			id;
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	loopback_intf.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+
+	loop->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_loop_source_desc);
+	if (!loop->in_ep) {
+autoconf_fail:
+		ERROR(cdev, "%s: can't autoconfigure on %s\n",
+			f->name, cdev->gadget->name);
+		return -ENODEV;
+	}
+	loop->in_ep->driver_data = cdev;	/* claim */
+
+	loop->out_ep = usb_ep_autoconfig(cdev->gadget, &fs_loop_sink_desc);
+	if (!loop->out_ep)
+		goto autoconf_fail;
+	loop->out_ep->driver_data = cdev;	/* claim */
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_loop_source_desc.bEndpointAddress =
+				fs_loop_source_desc.bEndpointAddress;
+		hs_loop_sink_desc.bEndpointAddress =
+				fs_loop_sink_desc.bEndpointAddress;
+		f->hs_descriptors = hs_loopback_descs;
+	}
+
+	/* support super speed hardware */
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_loop_source_desc.bEndpointAddress =
+				fs_loop_source_desc.bEndpointAddress;
+		ss_loop_sink_desc.bEndpointAddress =
+				fs_loop_sink_desc.bEndpointAddress;
+		f->ss_descriptors = ss_loopback_descs;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+	    (gadget_is_superspeed(c->cdev->gadget) ? "super" :
+	     (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
+			f->name, loop->in_ep->name, loop->out_ep->name);
+	return 0;
+}
+
+static void
+loopback_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	kfree(func_to_loop(f));
+}
+
+static void loopback_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_loopback	*loop = ep->driver_data;
+	struct usb_composite_dev *cdev = loop->function.config->cdev;
+	int			status = req->status;
+
+	switch (status) {
+
+	case 0:				/* normal completion? */
+		if (ep == loop->out_ep) {
+			/* loop this OUT packet back IN to the host */
+			req->zero = (req->actual < req->length);
+			req->length = req->actual;
+			status = usb_ep_queue(loop->in_ep, req, GFP_ATOMIC);
+			if (status == 0)
+				return;
+
+			/* "should never get here" */
+			ERROR(cdev, "can't loop %s to %s: %d\n",
+				ep->name, loop->in_ep->name,
+				status);
+		}
+
+		/* queue the buffer for some later OUT packet */
+		req->length = buflen;
+		status = usb_ep_queue(loop->out_ep, req, GFP_ATOMIC);
+		if (status == 0)
+			return;
+
+		/* "should never get here" */
+		/* FALLTHROUGH */
+
+	default:
+		ERROR(cdev, "%s loop complete --> %d, %d/%d\n", ep->name,
+				status, req->actual, req->length);
+		/* FALLTHROUGH */
+
+	/* NOTE:  since this driver doesn't maintain an explicit record
+	 * of requests it submitted (just maintains qlen count), we
+	 * rely on the hardware driver to clean up on disconnect or
+	 * endpoint disable.
+	 */
+	case -ECONNABORTED:		/* hardware forced ep reset */
+	case -ECONNRESET:		/* request dequeued */
+	case -ESHUTDOWN:		/* disconnect from host */
+		free_ep_req(ep, req);
+		return;
+	}
+}
+
+static void disable_loopback(struct f_loopback *loop)
+{
+	struct usb_composite_dev	*cdev;
+
+	cdev = loop->function.config->cdev;
+	disable_endpoints(cdev, loop->in_ep, loop->out_ep);
+	VDBG(cdev, "%s disabled\n", loop->function.name);
+}
+
+static int
+enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop)
+{
+	int					result = 0;
+	struct usb_ep				*ep;
+	struct usb_request			*req;
+	unsigned				i;
+
+	/* one endpoint writes data back IN to the host */
+	ep = loop->in_ep;
+	result = config_ep_by_speed(cdev->gadget, &(loop->function), ep);
+	if (result)
+		return result;
+	result = usb_ep_enable(ep);
+	if (result < 0)
+		return result;
+	ep->driver_data = loop;
+
+	/* one endpoint just reads OUT packets */
+	ep = loop->out_ep;
+	result = config_ep_by_speed(cdev->gadget, &(loop->function), ep);
+	if (result)
+		goto fail0;
+
+	result = usb_ep_enable(ep);
+	if (result < 0) {
+fail0:
+		ep = loop->in_ep;
+		usb_ep_disable(ep);
+		ep->driver_data = NULL;
+		return result;
+	}
+	ep->driver_data = loop;
+
+	/* allocate a bunch of read buffers and queue them all at once.
+	 * we buffer at most 'qlen' transfers; fewer if any need more
+	 * than 'buflen' bytes each.
+	 */
+	for (i = 0; i < qlen && result == 0; i++) {
+		req = alloc_ep_req(ep);
+		if (req) {
+			req->complete = loopback_complete;
+			result = usb_ep_queue(ep, req, GFP_ATOMIC);
+			if (result)
+				ERROR(cdev, "%s queue req --> %d\n",
+						ep->name, result);
+		} else {
+			usb_ep_disable(ep);
+			ep->driver_data = NULL;
+			result = -ENOMEM;
+			goto fail0;
+		}
+	}
+
+	DBG(cdev, "%s enabled\n", loop->function.name);
+	return result;
+}
+
+static int loopback_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct f_loopback	*loop = func_to_loop(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* we know alt is zero */
+	if (loop->in_ep->driver_data)
+		disable_loopback(loop);
+	return enable_loopback(cdev, loop);
+}
+
+static void loopback_disable(struct usb_function *f)
+{
+	struct f_loopback	*loop = func_to_loop(f);
+
+	disable_loopback(loop);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init loopback_bind_config(struct usb_configuration *c)
+{
+	struct f_loopback	*loop;
+	int			status;
+
+	loop = kzalloc(sizeof *loop, GFP_KERNEL);
+	if (!loop)
+		return -ENOMEM;
+
+	loop->function.name = "loopback";
+	loop->function.descriptors = fs_loopback_descs;
+	loop->function.bind = loopback_bind;
+	loop->function.unbind = loopback_unbind;
+	loop->function.set_alt = loopback_set_alt;
+	loop->function.disable = loopback_disable;
+
+	status = usb_add_function(c, &loop->function);
+	if (status)
+		kfree(loop);
+	return status;
+}
+
+static struct usb_configuration loopback_driver = {
+	.label		= "loopback",
+	.strings	= loopback_strings,
+	.bConfigurationValue = 2,
+	.bmAttributes	= USB_CONFIG_ATT_SELFPOWER,
+	/* .iConfiguration = DYNAMIC */
+};
+
+/**
+ * loopback_add - add a loopback testing configuration to a device
+ * @cdev: the device to support the loopback configuration
+ */
+int __init loopback_add(struct usb_composite_dev *cdev, bool autoresume)
+{
+	int id;
+
+	/* allocate string ID(s) */
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_loopback[0].id = id;
+
+	loopback_intf.iInterface = id;
+	loopback_driver.iConfiguration = id;
+
+	/* support autoresume for remote wakeup testing */
+	if (autoresume)
+		loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+
+	/* support OTG systems */
+	if (gadget_is_otg(cdev->gadget)) {
+		loopback_driver.descriptors = otg_desc;
+		loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	return usb_add_config(cdev, &loopback_driver, loopback_bind_config);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mass_storage.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mass_storage.c
new file mode 100644
index 0000000..c477f1a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mass_storage.c
@@ -0,0 +1,3638 @@
+/*
+ * f_mass_storage.c -- Mass Storage USB Composite Function
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyright (C) 2009 Samsung Electronics
+ *                    Author: Michal Nazarewicz <mina86@mina86.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * The Mass Storage Function acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive or as a CD-ROM drive.  In
+ * addition to providing an example of a genuinely useful composite
+ * function for a USB device, it also illustrates a technique of
+ * double-buffering for increased throughput.
+ *
+ * Function supports multiple logical units (LUNs).  Backing storage
+ * for each LUN is provided by a regular file or a block device.
+ * Access for each LUN can be limited to read-only.  Moreover, the
+ * function can indicate that LUN is removable and/or CD-ROM.  (The
+ * later implies read-only access.)
+ *
+ * MSF is configured by specifying a fsg_config structure.  It has the
+ * following fields:
+ *
+ *	nluns		Number of LUNs function have (anywhere from 1
+ *				to FSG_MAX_LUNS which is 8).
+ *	luns		An array of LUN configuration values.  This
+ *				should be filled for each LUN that
+ *				function will include (ie. for "nluns"
+ *				LUNs).  Each element of the array has
+ *				the following fields:
+ *	->filename	The path to the backing file for the LUN.
+ *				Required if LUN is not marked as
+ *				removable.
+ *	->ro		Flag specifying access to the LUN shall be
+ *				read-only.  This is implied if CD-ROM
+ *				emulation is enabled as well as when
+ *				it was impossible to open "filename"
+ *				in R/W mode.
+ *	->removable	Flag specifying that LUN shall be indicated as
+ *				being removable.
+ *	->cdrom		Flag specifying that LUN shall be reported as
+ *				being a CD-ROM.
+ *	->nofua		Flag specifying that FUA flag in SCSI WRITE(10,12)
+ *				commands for this LUN shall be ignored.
+ *
+ *	lun_name_format	A printf-like format for names of the LUN
+ *				devices.  This determines how the
+ *				directory in sysfs will be named.
+ *				Unless you are using several MSFs in
+ *				a single gadget (as opposed to single
+ *				MSF in many configurations) you may
+ *				leave it as NULL (in which case
+ *				"lun%d" will be used).  In the format
+ *				you can use "%d" to index LUNs for
+ *				MSF's with more than one LUN.  (Beware
+ *				that there is only one integer given
+ *				as an argument for the format and
+ *				specifying invalid format may cause
+ *				unspecified behaviour.)
+ *	thread_name	Name of the kernel thread process used by the
+ *				MSF.  You can safely set it to NULL
+ *				(in which case default "file-storage"
+ *				will be used).
+ *
+ *	vendor_name
+ *	product_name
+ *	release		Information used as a reply to INQUIRY
+ *				request.  To use default set to NULL,
+ *				NULL, 0xffff respectively.  The first
+ *				field should be 8 and the second 16
+ *				characters or less.
+ *
+ *	can_stall	Set to permit function to halt bulk endpoints.
+ *				Disabled on some USB devices known not
+ *				to work correctly.  You should set it
+ *				to true.
+ *
+ * If "removable" is not set for a LUN then a backing file must be
+ * specified.  If it is set, then NULL filename means the LUN's medium
+ * is not loaded (an empty string as "filename" in the fsg_config
+ * structure causes error).  The CD-ROM emulation includes a single
+ * data track and no audio tracks; hence there need be only one
+ * backing file per LUN.
+ *
+ *
+ * MSF includes support for module parameters.  If gadget using it
+ * decides to use it, the following module parameters will be
+ * available:
+ *
+ *	file=filename[,filename...]
+ *			Names of the files or block devices used for
+ *				backing storage.
+ *	ro=b[,b...]	Default false, boolean for read-only access.
+ *	removable=b[,b...]
+ *			Default true, boolean for removable media.
+ *	cdrom=b[,b...]	Default false, boolean for whether to emulate
+ *				a CD-ROM drive.
+ *	nofua=b[,b...]	Default false, booleans for ignore FUA flag
+ *				in SCSI WRITE(10,12) commands
+ *	luns=N		Default N = number of filenames, number of
+ *				LUNs to support.
+ *	stall		Default determined according to the type of
+ *				USB device controller (usually true),
+ *				boolean to permit the driver to halt
+ *				bulk endpoints.
+ *
+ * The module parameters may be prefixed with some string.  You need
+ * to consult gadget's documentation or source to verify whether it is
+ * using those module parameters and if it does what are the prefixes
+ * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
+ * the prefix).
+ *
+ *
+ * Requirements are modest; only a bulk-in and a bulk-out endpoint are
+ * needed.  The memory requirement amounts to two 16K buffers, size
+ * configurable by a parameter.  Support is included for both
+ * full-speed and high-speed operation.
+ *
+ * Note that the driver is slightly non-portable in that it assumes a
+ * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
+ * interrupt-in endpoints.  With most device controllers this isn't an
+ * issue, but there may be some with hardware restrictions that prevent
+ * a buffer from being used by more than one endpoint.
+ *
+ *
+ * The pathnames of the backing files and the ro settings are
+ * available in the attribute files "file" and "ro" in the lun<n> (or
+ * to be more precise in a directory which name comes from
+ * "lun_name_format" option!) subdirectory of the gadget's sysfs
+ * directory.  If the "removable" option is set, writing to these
+ * files will simulate ejecting/loading the medium (writing an empty
+ * line means eject) and adjusting a write-enable tab.  Changes to the
+ * ro setting are not allowed when the medium is loaded or if CD-ROM
+ * emulation is being used.
+ *
+ * When a LUN receive an "eject" SCSI request (Start/Stop Unit),
+ * if the LUN is removable, the backing file is released to simulate
+ * ejection.
+ *
+ *
+ * This function is heavily based on "File-backed Storage Gadget" by
+ * Alan Stern which in turn is heavily based on "Gadget Zero" by David
+ * Brownell.  The driver's SCSI command interface was based on the
+ * "Information technology - Small Computer System Interface - 2"
+ * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
+ * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
+ * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
+ * was based on the "Universal Serial Bus Mass Storage Class UFI
+ * Command Specification" document, Revision 1.0, December 14, 1998,
+ * available at
+ * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
+ */
+
+/*
+ *				Driver Design
+ *
+ * The MSF is fairly straightforward.  There is a main kernel
+ * thread that handles most of the work.  Interrupt routines field
+ * callbacks from the controller driver: bulk- and interrupt-request
+ * completion notifications, endpoint-0 events, and disconnect events.
+ * Completion events are passed to the main thread by wakeup calls.  Many
+ * ep0 requests are handled at interrupt time, but SetInterface,
+ * SetConfiguration, and device reset requests are forwarded to the
+ * thread in the form of "exceptions" using SIGUSR1 signals (since they
+ * should interrupt any ongoing file I/O operations).
+ *
+ * The thread's main routine implements the standard command/data/status
+ * parts of a SCSI interaction.  It and its subroutines are full of tests
+ * for pending signals/exceptions -- all this polling is necessary since
+ * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
+ * indication that the driver really wants to be running in userspace.)
+ * An important point is that so long as the thread is alive it keeps an
+ * open reference to the backing file.  This will prevent unmounting
+ * the backing file's underlying filesystem and could cause problems
+ * during system shutdown, for example.  To prevent such problems, the
+ * thread catches INT, TERM, and KILL signals and converts them into
+ * an EXIT exception.
+ *
+ * In normal operation the main thread is started during the gadget's
+ * fsg_bind() callback and stopped during fsg_unbind().  But it can
+ * also exit when it receives a signal, and there's no point leaving
+ * the gadget running when the thread is dead.  At of this moment, MSF
+ * provides no way to deregister the gadget when thread dies -- maybe
+ * a callback functions is needed.
+ *
+ * To provide maximum throughput, the driver uses a circular pipeline of
+ * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
+ * arbitrarily long; in practice the benefits don't justify having more
+ * than 2 stages (i.e., double buffering).  But it helps to think of the
+ * pipeline as being a long one.  Each buffer head contains a bulk-in and
+ * a bulk-out request pointer (since the buffer can be used for both
+ * output and input -- directions always are given from the host's
+ * point of view) as well as a pointer to the buffer and various state
+ * variables.
+ *
+ * Use of the pipeline follows a simple protocol.  There is a variable
+ * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
+ * At any time that buffer head may still be in use from an earlier
+ * request, so each buffer head has a state variable indicating whether
+ * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
+ * buffer head to be EMPTY, filling the buffer either by file I/O or by
+ * USB I/O (during which the buffer head is BUSY), and marking the buffer
+ * head FULL when the I/O is complete.  Then the buffer will be emptied
+ * (again possibly by USB I/O, during which it is marked BUSY) and
+ * finally marked EMPTY again (possibly by a completion routine).
+ *
+ * A module parameter tells the driver to avoid stalling the bulk
+ * endpoints wherever the transport specification allows.  This is
+ * necessary for some UDCs like the SuperH, which cannot reliably clear a
+ * halt on a bulk endpoint.  However, under certain circumstances the
+ * Bulk-only specification requires a stall.  In such cases the driver
+ * will halt the endpoint and set a flag indicating that it should clear
+ * the halt in software during the next device reset.  Hopefully this
+ * will permit everything to work correctly.  Furthermore, although the
+ * specification allows the bulk-out endpoint to halt when the host sends
+ * too much data, implementing this would cause an unavoidable race.
+ * The driver will always use the "no-stall" approach for OUT transfers.
+ *
+ * One subtle point concerns sending status-stage responses for ep0
+ * requests.  Some of these requests, such as device reset, can involve
+ * interrupting an ongoing file I/O operation, which might take an
+ * arbitrarily long time.  During that delay the host might give up on
+ * the original ep0 request and issue a new one.  When that happens the
+ * driver should not notify the host about completion of the original
+ * request, as the host will no longer be waiting for it.  So the driver
+ * assigns to each ep0 request a unique tag, and it keeps track of the
+ * tag value of the request associated with a long-running exception
+ * (device-reset, interface-change, or configuration-change).  When the
+ * exception handler is finished, the status-stage response is submitted
+ * only if the current ep0 request tag is equal to the exception request
+ * tag.  Thus only the most recently received ep0 request will get a
+ * status-stage response.
+ *
+ * Warning: This driver source file is too long.  It ought to be split up
+ * into a header file plus about 3 separate .c files, to handle the details
+ * of the Gadget, USB Mass Storage, and SCSI protocols.
+ */
+
+
+/* #define VERBOSE_DEBUG */
+/* #define DUMP_MSGS */
+#pragma GCC optimize("O0")
+
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/freezer.h>
+#include <linux/utsname.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
+#include <linux/android_notify.h>
+
+#include "gadget_chips.h"
+#include <mach/highspeed_debug.h>
+
+#ifndef DEBUG
+#define DEBUG
+#endif
+#ifndef CONFIG_DYNAMIC_DEBUG
+#define CONFIG_DYNAMIC_DEBUG
+#endif
+
+#ifndef DEBUG_CONFIG
+#define DEBUG_CONFIG	1
+#endif
+
+#define USB_DEBUG(fmt,arg...) 	printk(KERN_WARNING "#####%s@%d:" fmt "\n",__FUNCTION__,__LINE__,##arg) 
+
+/*------------------------------------------------------------------------*/
+
+#define FSG_DRIVER_DESC		"Mass Storage Function"
+#define FSG_DRIVER_VERSION	"2009/09/11"
+
+static const char fsg_string_interface[] = "Mass Storage";
+
+#define FSG_NO_DEVICE_STRINGS    1
+#define FSG_NO_OTG               1
+#define FSG_NO_INTR_EP           1
+
+#define MAC_EJECTCDROM_3G_REQUEST	0xA1
+#define MAC_EJECTCDROM_4G_REQUEST	0xA2
+
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#include "storage_common.c"
+
+
+#ifndef DEBUG
+#define DEBUG
+#endif
+#ifndef CONFIG_DYNAMIC_DEBUG
+#define CONFIG_DYNAMIC_DEBUG
+#endif
+
+#ifndef DEBUG_CONFIG
+#define DEBUG_CONFIG	1
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+#ifndef CONFIG_SYSTEM_RECOVERY
+int zDrvNand_WriteBootflag( int flag );
+#endif
+void usb_notify_up(usb_notify_event notify_type, void* puf);
+int usb_is_reject_cdrom(void);
+#define OPENDL     0x99
+#define CLOSEDL_switch_to_USER 0x98
+
+struct fsg_dev;
+struct fsg_common;
+
+struct sched_param sch_param;
+
+/* FSF callback functions */
+struct fsg_operations {
+	/*
+	 * Callback function to call when thread exits.  If no
+	 * callback is set or it returns value lower then zero MSF
+	 * will force eject all LUNs it operates on (including those
+	 * marked as non-removable or with prevent_medium_removal flag
+	 * set).
+	 */
+	int (*thread_exits)(struct fsg_common *common);
+
+	/*
+	 * Called prior to ejection.  Negative return means error,
+	 * zero means to continue with ejection, positive means not to
+	 * eject.
+	 */
+	int (*pre_eject)(struct fsg_common *common,
+			 struct fsg_lun *lun, int num);
+	/*
+	 * Called after ejection.  Negative return means error, zero
+	 * or positive is just a success.
+	 */
+	int (*post_eject)(struct fsg_common *common,
+			  struct fsg_lun *lun, int num);
+};
+
+/* Data shared by all the FSG instances. */
+struct fsg_common {
+	struct usb_gadget	*gadget;
+	struct usb_composite_dev *cdev;
+	struct fsg_dev		*fsg, *new_fsg;
+	wait_queue_head_t	fsg_wait;
+
+	/* filesem protects: backing files in use */
+	struct rw_semaphore	filesem;
+
+	/* lock protects: state, all the req_busy's */
+	spinlock_t		lock;
+
+	struct usb_ep		*ep0;		/* Copy of gadget->ep0 */
+	struct usb_request	*ep0req;	/* Copy of cdev->req */
+	unsigned int		ep0_req_tag;
+
+	struct fsg_buffhd	*next_buffhd_to_fill;
+	struct fsg_buffhd	*next_buffhd_to_drain;
+	struct fsg_buffhd	*buffhds;
+
+	int			cmnd_size;
+	u8			cmnd[MAX_COMMAND_SIZE];
+
+	unsigned int		nluns;
+	unsigned int		lun;
+	struct fsg_lun		*luns;
+	struct fsg_lun		*curlun;
+
+	unsigned int		bulk_out_maxpacket;
+	enum fsg_state		state;		/* For exception handling */
+	unsigned int		exception_req_tag;
+
+	enum data_direction	data_dir;
+	u32			data_size;
+	u32			data_size_from_cmnd;
+	u32			tag;
+	u32			residue;
+	u32			usb_amount_left;
+
+	unsigned int		can_stall:1;
+	unsigned int		free_storage_on_release:1;
+	unsigned int		phase_error:1;
+	unsigned int		short_packet_received:1;
+	unsigned int		bad_lun_okay:1;
+	unsigned int		running:1;
+
+	int			thread_wakeup_needed;
+	struct completion	thread_notifier;
+	struct task_struct	*thread_task;
+
+	/* Callback functions. */
+	const struct fsg_operations	*ops;
+	/* Gadget's private data. */
+	void			*private_data;
+
+	/*
+	 * Vendor (8 chars), product (16 chars), release (4
+	 * hexadecimal digits) and NUL byte
+	 */
+	char inquiry_string[8 + 16 + 4 + 1];
+
+	struct kref		ref;
+	struct work_struct work;
+};
+
+struct fsg_config {
+	unsigned nluns;
+	struct fsg_lun_config {
+		const char *filename;
+		char ro;
+		char removable;
+		char cdrom;
+		char nofua;
+	} luns[FSG_MAX_LUNS];
+
+	const char		*lun_name_format;
+	const char		*thread_name;
+
+	/* Callback functions. */
+	const struct fsg_operations	*ops;
+	/* Gadget's private data. */
+	void			*private_data;
+
+	const char *vendor_name;		/*  8 characters or less */
+	const char *product_name;		/* 16 characters or less */
+	u16 release;
+
+	char			can_stall;
+};
+
+struct fsg_dev {
+	struct usb_function	function;
+	struct usb_gadget	*gadget;	/* Copy of cdev->gadget */
+	struct fsg_common	*common;
+
+	u16			interface_number;
+
+	unsigned int		bulk_in_enabled:1;
+	unsigned int		bulk_out_enabled:1;
+
+	unsigned long		atomic_bitflags;
+#define IGNORE_BULK_OUT		0
+
+	struct usb_ep		*bulk_in;
+	struct usb_ep		*bulk_out;
+#ifdef CONFIG_PM
+	u32    suspend_state;
+#endif
+};
+
+int usb_do_reject_cdrom(void);
+static inline int __fsg_is_set(struct fsg_common *common,
+			       const char *func, unsigned line)
+{
+	if (common->fsg)
+		return 1;
+	ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
+	WARN_ON(1);
+	return 0;
+}
+
+#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
+
+static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
+{
+	return container_of(f, struct fsg_dev, function);
+}
+
+typedef void (*fsg_routine_t)(struct fsg_dev *);
+
+static int exception_in_progress(struct fsg_common *common)
+{
+	return common->state > FSG_STATE_IDLE;
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void set_bulk_out_req_length(struct fsg_common *common,
+				    struct fsg_buffhd *bh, unsigned int length)
+{
+	unsigned int	rem;
+
+	bh->bulk_out_intended_length = length;
+	rem = length % common->bulk_out_maxpacket;
+	if (rem > 0)
+		length += common->bulk_out_maxpacket - rem;
+	bh->outreq->length = length;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+{
+	const char	*name;
+
+	if (ep == fsg->bulk_in)
+		name = "bulk-in";
+	else if (ep == fsg->bulk_out)
+		name = "bulk-out";
+	else
+		name = ep->name;
+	DBG(fsg, "%s set halt\n", name);
+	return usb_ep_set_halt(ep);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+/* Caller must hold fsg->lock */
+static void wakeup_thread(struct fsg_common *common)
+{
+	smp_wmb();	/* ensure the write of bh->state is complete */
+	/* Tell the main thread that something has happened */
+	common->thread_wakeup_needed = 1;
+	if (common->thread_task)
+		wake_up_process(common->thread_task);
+}
+
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+	unsigned long		flags;
+
+	/*
+	 * Do nothing if a higher-priority exception is already in progress.
+	 * If a lower-or-equal priority exception is in progress, preempt it
+	 * and notify the main thread by sending it a signal.
+	 */
+	spin_lock_irqsave(&common->lock, flags);
+	if (common->state <= new_state) {
+		common->exception_req_tag = common->ep0_req_tag;
+		common->state = new_state;
+		if (common->thread_task)
+			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+				      common->thread_task);
+	}
+	spin_unlock_irqrestore(&common->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int ep0_queue(struct fsg_common *common)
+{
+	int	rc;
+
+	rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
+	common->ep0->driver_data = common;
+	if (rc != 0 && rc != -ESHUTDOWN) {
+		/* We can't do much more than wait for a reset */
+		WARNING(common, "error in submission: %s --> %d\n",
+			common->ep0->name, rc);
+	}
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Completion handlers. These always run in_irq. */
+
+static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_common	*common = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	if (req->status || req->actual != req->length)
+		DBG(common, "%s --> %d, %u/%u\n", __func__,
+		    req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		/* Request was cancelled */
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&common->lock);
+	bh->inreq_busy = 0;
+	bh->state = BUF_STATE_EMPTY;
+	wakeup_thread(common);
+	spin_unlock(&common->lock);
+}
+
+static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_common	*common = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	dump_msg(common, "bulk-out", req->buf, req->actual);
+	if (req->status || req->actual != bh->bulk_out_intended_length)
+		DBG(common, "%s --> %d, %u/%u\n", __func__,
+		    req->status, req->actual, bh->bulk_out_intended_length);
+	if (req->status == -ECONNRESET)		/* Request was cancelled */
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&common->lock);
+	bh->outreq_busy = 0;
+	bh->state = BUF_STATE_FULL;
+	wakeup_thread(common);
+	spin_unlock(&common->lock);
+}
+
+static int fsg_setup(struct usb_function *f,
+		     const struct usb_ctrlrequest *ctrl)
+{
+	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct usb_request	*req = fsg->common->ep0req;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	if (!fsg_is_set(fsg->common))
+		return -EOPNOTSUPP;
+
+	++fsg->common->ep0_req_tag;	/* Record arrival of a new request */
+	req->context = NULL;
+	req->length = 0;
+	dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
+
+	switch (ctrl->bRequest) {
+
+	case US_BULK_RESET_REQUEST:
+		if (ctrl->bRequestType !=
+		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+			break;
+		if (w_index != fsg->interface_number || w_value != 0 ||
+				w_length != 0)
+			return -EDOM;
+
+		/*
+		 * Raise an exception to stop the current operation
+		 * and reinitialize our state.
+		 */
+		DBG(fsg, "bulk reset request\n");
+		raise_exception(fsg->common, FSG_STATE_RESET);
+		return DELAYED_STATUS;
+
+	case US_BULK_GET_MAX_LUN:
+		if (ctrl->bRequestType !=
+		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+			break;
+		if (w_index != fsg->interface_number || w_value != 0 ||
+				w_length != 1)
+			return -EDOM;
+		VDBG(fsg, "get max LUN\n");
+		*(u8 *)req->buf = fsg->common->nluns - 1;
+
+		/* Respond with data/status */
+		req->length = min((u16)1, w_length);
+		return ep0_queue(fsg->common);
+	case MAC_EJECTCDROM_3G_REQUEST:
+	case MAC_EJECTCDROM_4G_REQUEST:
+		//ADD TODO
+		return 0;
+		
+	}
+
+	VDBG(fsg,
+	     "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
+	     ctrl->bRequestType, ctrl->bRequest,
+	     le16_to_cpu(ctrl->wValue), w_index, w_length);
+	return -EOPNOTSUPP;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+#ifdef CONFIG_PM
+unsigned int g_dbg_mass_times = 0;
+#endif
+static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+			   struct usb_request *req, int *pbusy,
+			   enum fsg_buffer_state *state)
+{
+	int	rc;
+
+	if (ep == fsg->bulk_in)
+		dump_msg(fsg, "bulk-in", req->buf, req->length);
+
+#ifdef CONFIG_PM
+    if (fsg->suspend_state == 1){
+		g_dbg_mass_times++;
+		usb_printk("%s, %u name:%s, wrtime:%d\n", __func__, __LINE__, fsg->function.name, g_dbg_mass_times);
+		USBSTACK_DBG("%s, %u name:%s, wrtime:%d", __func__, __LINE__, fsg->function.name, g_dbg_mass_times);
+#if 0
+        usb_gadget_wakeup(fsg->gadget);
+		do{
+			msleep(2);
+		}while(fsg->suspend_state==1);
+#endif
+	}
+#endif
+
+	spin_lock_irq(&fsg->common->lock);
+	*pbusy = 1;
+	*state = BUF_STATE_BUSY;
+	spin_unlock_irq(&fsg->common->lock);
+	rc = usb_ep_queue(ep, req, GFP_KERNEL);
+	if (rc != 0) {
+		*pbusy = 0;
+		*state = BUF_STATE_EMPTY;
+
+		/* We can't do much more than wait for a reset */
+
+		/*
+		 * Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled.
+		 */
+		if (rc != -ESHUTDOWN &&
+		    !(rc == -EOPNOTSUPP && req->length == 0))
+			WARNING(fsg, "error in submission: %s --> %d\n",
+				ep->name, rc);
+	}
+}
+
+static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	if (!fsg_is_set(common))
+		return false;
+	start_transfer(common->fsg, common->fsg->bulk_in,
+		       bh->inreq, &bh->inreq_busy, &bh->state);
+	return true;
+}
+
+static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	if (!fsg_is_set(common))
+		return false;
+	start_transfer(common->fsg, common->fsg->bulk_out,
+		       bh->outreq, &bh->outreq_busy, &bh->state);
+	return true;
+}
+
+static int sleep_thread(struct fsg_common *common)
+{
+	int	rc = 0;
+
+	/* Wait until a signal arrives or we are woken up */
+	for (;;) {
+		try_to_freeze();
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+		if (common->thread_wakeup_needed)
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	common->thread_wakeup_needed = 0;
+	smp_rmb();	/* ensure the latest bh->state is visible */
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u32			amount_left;
+	loff_t			file_offset, file_offset_tmp;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big.
+	 */
+	if (common->cmnd[0] == READ_6)
+		lba = get_unaligned_be24(&common->cmnd[1]);
+	else {
+		lba = get_unaligned_be32(&common->cmnd[2]);
+
+		/*
+		 * We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = don't read from the
+		 * cache), but we don't implement them.
+		 */
+		if ((common->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+	file_offset = ((loff_t) lba) << curlun->blkbits;
+
+	/* Carry out the file reads */
+	amount_left = common->data_size_from_cmnd;
+	if (unlikely(amount_left == 0))
+		return -EIO;		/* No default reply */
+
+	for (;;) {
+		/*
+		 * Figure out how much we need to read:
+		 * Try to read the remaining amount.
+		 * But don't read more than the buffer size.
+		 * And don't try to read past the end of the file.
+		 */
+		amount = min(amount_left, FSG_BUFLEN);
+		amount = min((loff_t)amount,
+			     curlun->file_length - file_offset);
+
+		/* Wait for the next buffer to become available */
+		bh = common->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(common);
+			if (rc)
+				return rc;
+		}
+
+		/*
+		 * If we were asked to read past the end of file,
+		 * end with an empty buffer.
+		 */
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			bh->inreq->length = 0;
+			bh->state = BUF_STATE_FULL;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				 (char __user *)bh->buf,
+				 amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+		      (unsigned long long)file_offset, (int)nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file read: %d\n", (int)nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file read: %d/%u\n",
+			     (int)nread, amount);
+			nread = round_down(nread, curlun->blksize);
+		}
+		file_offset  += nread;
+		amount_left  -= nread;
+		common->residue -= nread;
+
+		/*
+		 * Except at the end of the transfer, nread will be
+		 * equal to the buffer size, which is divisible by the
+		 * bulk-in maxpacket size.
+		 */
+		bh->inreq->length = nread;
+		bh->state = BUF_STATE_FULL;
+
+		/* If an error occurred, report it and its position */
+		if (nread < amount) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		if (amount_left == 0)
+			break;		/* No more left to read */
+
+		/* Send this buffer and go read some more */
+		bh->inreq->zero = 0;
+		if (!start_in_transfer(common, bh))
+			/* Don't know what to do if common->fsg is NULL */
+			return -EIO;
+		common->next_buffhd_to_fill = bh->next;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			get_some_more;
+	u32			amount_left_to_req, amount_left_to_write;
+	loff_t			usb_offset, file_offset, file_offset_tmp;
+	unsigned int		amount;
+	ssize_t			nwritten;
+	int			rc;
+
+	if (curlun->ro) {
+		curlun->sense_data = SS_WRITE_PROTECTED;
+		return -EINVAL;
+	}
+	spin_lock(&curlun->filp->f_lock);
+	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
+	spin_unlock(&curlun->filp->f_lock);
+
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big
+	 */
+	if (common->cmnd[0] == WRITE_6)
+		lba = get_unaligned_be24(&common->cmnd[1]);
+	else {
+		lba = get_unaligned_be32(&common->cmnd[2]);
+
+		/*
+		 * We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = write directly to the
+		 * medium).  We don't implement DPO; we implement FUA by
+		 * performing synchronous output.
+		 */
+		if (common->cmnd[1] & ~0x18) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+		if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
+			spin_lock(&curlun->filp->f_lock);
+			curlun->filp->f_flags |= O_SYNC;
+			spin_unlock(&curlun->filp->f_lock);
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* Carry out the file writes */
+	get_some_more = 1;
+	file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
+	amount_left_to_req = common->data_size_from_cmnd;
+	amount_left_to_write = common->data_size_from_cmnd;
+
+	while (amount_left_to_write > 0) {
+
+		/* Queue a request for more data from the host */
+		bh = common->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+			/*
+			 * Figure out how much we want to get:
+			 * Try to get the remaining amount,
+			 * but not more than the buffer size.
+			 */
+			amount = min(amount_left_to_req, FSG_BUFLEN);
+
+			/* Beyond the end of the backing file? */
+			if (usb_offset >= curlun->file_length) {
+				get_some_more = 0;
+				curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+				curlun->sense_data_info =
+					usb_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				continue;
+			}
+
+			/* Get the next buffer */
+			usb_offset += amount;
+			common->usb_amount_left -= amount;
+			amount_left_to_req -= amount;
+			if (amount_left_to_req == 0)
+				get_some_more = 0;
+
+			/*
+			 * Except at the end of the transfer, amount will be
+			 * equal to the buffer size, which is divisible by
+			 * the bulk-out maxpacket size.
+			 */
+			set_bulk_out_req_length(common, bh, amount);
+			if (!start_out_transfer(common, bh))
+				/* Dunno what to do if common->fsg is NULL */
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			continue;
+		}
+
+		/* Write the received data to the backing file */
+		bh = common->next_buffhd_to_drain;
+		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+			break;			/* We stopped early */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			common->next_buffhd_to_drain = bh->next;
+			bh->state = BUF_STATE_EMPTY;
+
+			/* Did something go wrong with the transfer? */
+			if (bh->outreq->status != 0) {
+				curlun->sense_data = SS_COMMUNICATION_FAILURE;
+				curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				break;
+			}
+
+			amount = bh->outreq->actual;
+			if (curlun->file_length - file_offset < amount) {
+				LERROR(curlun,
+				       "write %u @ %llu beyond end %llu\n",
+				       amount, (unsigned long long)file_offset,
+				       (unsigned long long)curlun->file_length);
+				amount = curlun->file_length - file_offset;
+			}
+
+			/* Don't accept excess data.  The spec doesn't say
+			 * what to do in this case.  We'll ignore the error.
+			 */
+			amount = min(amount, bh->bulk_out_intended_length);
+
+			/* Don't write a partial block */
+			amount = round_down(amount, curlun->blksize);
+			if (amount == 0)
+				goto empty_write;
+
+			/* Perform the write */
+			file_offset_tmp = file_offset;
+			nwritten = vfs_write(curlun->filp,
+					     (char __user *)bh->buf,
+					     amount, &file_offset_tmp);
+			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+			      (unsigned long long)file_offset, (int)nwritten);
+			if (signal_pending(current))
+				return -EINTR;		/* Interrupted! */
+
+			if (nwritten < 0) {
+				LDBG(curlun, "error in file write: %d\n",
+				     (int)nwritten);
+				nwritten = 0;
+			} else if (nwritten < amount) {
+				LDBG(curlun, "partial file write: %d/%u\n",
+				     (int)nwritten, amount);
+				nwritten = round_down(nwritten, curlun->blksize);
+			}
+			file_offset += nwritten;
+			amount_left_to_write -= nwritten;
+			common->residue -= nwritten;
+
+			/* If an error occurred, report it and its position */
+			if (nwritten < amount) {
+				curlun->sense_data = SS_WRITE_ERROR;
+				curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				break;
+			}
+
+ empty_write:
+			/* Did the host decide to stop early? */
+			if (bh->outreq->actual < bh->bulk_out_intended_length) {
+				common->short_packet_received = 1;
+				break;
+			}
+			continue;
+		}
+
+		/* Wait for something to happen */
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_synchronize_cache(struct fsg_common *common)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		rc;
+
+	/* We ignore the requested LBA and write out all file's
+	 * dirty data buffers. */
+	rc = fsg_lun_fsync_sub(curlun);
+	if (rc)
+		curlun->sense_data = SS_WRITE_ERROR;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct fsg_lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode = filp->f_path.dentry->d_inode;
+	unsigned long	rc;
+
+	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	u32			verification_length;
+	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
+	loff_t			file_offset, file_offset_tmp;
+	u32			amount_left;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big.
+	 */
+	lba = get_unaligned_be32(&common->cmnd[2]);
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/*
+	 * We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it.
+	 */
+	if (common->cmnd[1] & ~0x10) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	verification_length = get_unaligned_be16(&common->cmnd[7]);
+	if (unlikely(verification_length == 0))
+		return -EIO;		/* No default reply */
+
+	/* Prepare to carry out the file verify */
+	amount_left = verification_length << curlun->blkbits;
+	file_offset = ((loff_t) lba) << curlun->blkbits;
+
+	/* Write out all the dirty buffers before invalidating them */
+	fsg_lun_fsync_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	invalidate_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* Just try to read the requested blocks */
+	while (amount_left > 0) {
+		/*
+		 * Figure out how much we need to read:
+		 * Try to read the remaining amount, but not more than
+		 * the buffer size.
+		 * And don't try to read past the end of the file.
+		 */
+		amount = min(amount_left, FSG_BUFLEN);
+		amount = min((loff_t)amount,
+			     curlun->file_length - file_offset);
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info =
+				file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file verify: %d\n", (int)nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file verify: %d/%u\n",
+			     (int)nread, amount);
+			nread = round_down(nread, curlun->blksize);
+		}
+		if (nread == 0) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info =
+				file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+		file_offset += nread;
+		amount_left -= nread;
+	}
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun *curlun = common->curlun;
+	u8	*buf = (u8 *) bh->buf;
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		common->bad_lun_okay = 1;
+		memset(buf, 0, 36);
+		buf[0] = 0x7f;		/* Unsupported, no device-type */
+		buf[4] = 31;		/* Additional length */
+		return 36;
+	}
+
+	buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
+	buf[1] = curlun->removable ? 0x80 : 0;
+	buf[2] = 2;		/* ANSI SCSI level 2 */
+	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
+	buf[4] = 31;		/* Additional length */
+	buf[5] = 0;		/* No special options */
+	buf[6] = 0;
+	buf[7] = 0;
+	memcpy(buf + 8, curlun->inquiry_string, sizeof curlun->inquiry_string);
+	return 36;
+}
+
+static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u8		*buf = (u8 *) bh->buf;
+	u32		sd, sdinfo;
+	int		valid;
+
+	/*
+	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+	 *
+	 * If a REQUEST SENSE command is received from an initiator
+	 * with a pending unit attention condition (before the target
+	 * generates the contingent allegiance condition), then the
+	 * target shall either:
+	 *   a) report any pending sense data and preserve the unit
+	 *	attention condition on the logical unit, or,
+	 *   b) report the unit attention condition, may discard any
+	 *	pending sense data, and clear the unit attention
+	 *	condition on the logical unit for that initiator.
+	 *
+	 * FSG normally uses option a); enable this code to use option b).
+	 */
+#if 0
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+	}
+#endif
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		common->bad_lun_okay = 1;
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+		sdinfo = 0;
+		valid = 0;
+	} else {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+		valid = curlun->info_valid << 7;
+		curlun->sense_data = SS_NO_SENSE;
+		curlun->sense_data_info = 0;
+		curlun->info_valid = 0;
+	}
+
+	memset(buf, 0, 18);
+	buf[0] = valid | 0x70;			/* Valid, current error */
+	buf[2] = SK(sd);
+	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
+	buf[7] = 18 - 8;			/* Additional sense length */
+	buf[12] = ASC(sd);
+	buf[13] = ASCQ(sd);
+	return 18;
+}
+
+static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u32		lba = get_unaligned_be32(&common->cmnd[2]);
+	int		pmi = common->cmnd[8];
+	u8		*buf = (u8 *)bh->buf;
+
+	/* Check the PMI and LBA fields */
+	if (pmi > 1 || (pmi == 0 && lba != 0)) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
+						/* Max logical block */
+	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
+	return 8;
+}
+
+static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		msf = common->cmnd[1] & 0x02;
+	u32		lba = get_unaligned_be32(&common->cmnd[2]);
+	u8		*buf = (u8 *)bh->buf;
+
+	if (common->cmnd[1] & ~0x02) {		/* Mask away MSF */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 8);
+	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
+	store_cdrom_address(&buf[4], msf, lba);
+	return 8;
+}
+
+#if 0
+static u8 Option_response_data48[] =
+    {
+        0x00, 0x2e, 0x01, 0x01, 0x01,
+        0x14, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01,
+        0x14, 0x00, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01,
+        0x14, 0x00, 0xa2, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x3c, 0x01,
+        0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00
+    };
+
+#else
+static u8 Option_response_data48[] =
+    {
+        0x00, 0x2e, 0x01, 0x01, 0x01,
+        0x14, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01,
+        0x14, 0x00, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01,
+        0x14, 0x00, 0xa2, 0x00, 0x00, 0x00, 0x00, 0x08, 0x18, 0x24, 0x01,
+        0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00
+    };
+#endif
+#define TOC_TRACK_HEAD_LEN			4
+#define TOC_TRACK_DES_SESSION_NUM 	0
+#define TOC_TRACK_DES_ADR_CTRL 		1
+#define TOC_TRACK_DES_TNO 			2
+#define TOC_TRACK_DES_POINT			3
+#define TOC_TRACK_DES_MIN			4
+#define TOC_TRACK_DES_SEC			5
+#define TOC_TRACK_DES_FRAME			6
+#define TOC_TRACK_DES_ZERO			7
+#define TOC_TRACK_DES_PMIN			8
+#define TOC_TRACK_DES_PSEC			9
+#define TOC_TRACK_DES_PFRAME    	10
+#define TOC_TRACK_DES_LEN			11
+
+static void toc_lba_to_address(u8 *dest, int msf, u32 addr)
+{
+	if (msf) {
+		/* Convert to Minutes-Seconds-Frames */
+		addr >>= 2;		/* Convert to 2048-byte frames */
+		addr += 2*75;		/* Lead-in occupies 2 seconds */
+		dest[2] = addr % 75;	/* Frames */
+		addr /= 75;
+		dest[1] = addr % 60;	/* Seconds */
+		addr /= 60;
+		dest[0] = addr;		/* Minutes */
+	} else {
+		/* Absolute sector */
+		put_unaligned_be32(addr, dest);
+	}
+}
+
+static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		msf = common->cmnd[1] & 0x02;
+	int		start_track = common->cmnd[6];
+	int 	formate = common->cmnd[9] & 0xc0;
+	u8		*buf = (u8 *)bh->buf;
+	int		alloclength =(( common->cmnd[7] << 8 )+ common->cmnd[8]);
+	int 	retlength = 0;
+	u8 		index = 0;
+	loff_t	addr = curlun->num_sectors;
+
+
+    switch (formate)
+    {
+    case 0x80: //multi sesstion
+    {
+		memset(buf, 0, TOC_TRACK_HEAD_LEN);
+		buf[index + 1] = 0x2e;		/* TOC data length */
+		buf[index + 2] = 0x01;			/* First track number */
+        buf[index + 3] = 0x01;			/* Last track number */
+		index += TOC_TRACK_HEAD_LEN;
+
+		memset(&buf[index], 0, TOC_TRACK_DES_LEN);
+		buf[index + TOC_TRACK_DES_SESSION_NUM] = 0x01;	/* session number,only 1 session */
+		buf[index + TOC_TRACK_DES_ADR_CTRL] = 0x14;		/* ADR Mode-1 Q Control Data track,recorded uninterrupted */
+		buf[index + TOC_TRACK_DES_TNO] = 0x00;			/*TNO 00 for lead in area*/
+		buf[index + TOC_TRACK_DES_POINT] = 0xA0;		/*First Track number in the program area*/
+		buf[index + TOC_TRACK_DES_PMIN] = 0x01;			/*First track is 1,disc type 00 cd-da or cd-rom*/
+		index += TOC_TRACK_DES_LEN;
+
+		memset(&buf[index], 0, TOC_TRACK_DES_LEN);
+		buf[index + TOC_TRACK_DES_SESSION_NUM] = 0x01;	/* session number,only 1 session */
+		buf[index + TOC_TRACK_DES_ADR_CTRL] = 0x14;		/* ADR Mode-1 Q Control Data track,recorded uninterrupted */
+		buf[index + TOC_TRACK_DES_TNO] = 0x00;			/*TNO 00 for lead in area*/
+		buf[index + TOC_TRACK_DES_POINT] = 0xA1;		/*Last Track number in the program area*/
+		buf[index + TOC_TRACK_DES_PMIN] = 0x01;			/*Last track is 1,disc type 00 cd-da or cd-rom*/
+		index += TOC_TRACK_DES_LEN;
+
+		memset(&buf[index], 0, TOC_TRACK_DES_LEN);
+		buf[index + TOC_TRACK_DES_SESSION_NUM] = 0x01;	/* session number,only 1 session */
+		buf[index + TOC_TRACK_DES_ADR_CTRL] = 0x14;		/* ADR Mode-1 Q Control Data track,recorded uninterrupted */
+		buf[index + TOC_TRACK_DES_TNO] = 0x00;			/*TNO 00 for lead in area*/
+		buf[index + TOC_TRACK_DES_POINT] = 0xA2;		/*First Track number in the lead out area*/
+		buf[index + TOC_TRACK_DES_PMIN] = 0x01;			/*start position of the lead out area*/
+		if (curlun->blksize == 2048)
+		{
+			addr = curlun->num_sectors << 2;
+		}
+		toc_lba_to_address(&buf[index + TOC_TRACK_DES_PMIN],msf,addr);
+		index += TOC_TRACK_DES_LEN;
+
+		memset(&buf[index], 0, TOC_TRACK_DES_LEN);
+		buf[index + TOC_TRACK_DES_SESSION_NUM] = 0x01;	/* session number,only 1 session */
+		buf[index + TOC_TRACK_DES_ADR_CTRL] = 0x14;		/* ADR Mode-1 Q Control Data track,recorded uninterrupted */
+		buf[index + TOC_TRACK_DES_TNO] = 0x00;			/*TNO 00 for lead in area*/
+		buf[index + TOC_TRACK_DES_POINT] = 0x01;		/*start track*/
+		buf[index + TOC_TRACK_DES_PSEC] = 0x02;			/*start position of track MFS 00 02 00*/
+		index += TOC_TRACK_DES_LEN;	
+		//memcpy(buf,Option_response_data48,MIN(48, alloclength));
+		retlength = MIN(index, alloclength);
+	
+        break;
+    }
+    case 0x40:
+    {
+	curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+        return -EINVAL;
+    }
+    case 0x00:
+    {
+	if ((common->cmnd[1] & ~0x02) != 0 ||	/* Mask away MSF */
+			start_track > 1) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	
+	memset(buf, 0, 20);
+	buf[1] = (20-2);		/* TOC data length */
+	buf[2] = 1;			/* First track number */
+	buf[3] = 1;			/* Last track number */
+	buf[5] = 0x16;			/* Data track, copying allowed */
+	buf[6] = 0xAA;			/* Only track is number 1 */
+	store_cdrom_address(&buf[8], msf, 0);
+
+	buf[13] = 0x16;			/* Lead-out track is data */
+	buf[14] = 0xAA;			/* Lead-out track number */
+	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
+
+        retlength = MIN(20, alloclength);
+        break;
+    }
+    }
+
+   return retlength;
+}
+
+static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		mscmnd = common->cmnd[0];
+	u8		*buf = (u8 *) bh->buf;
+	u8		*buf0 = buf;
+	int		pc, page_code;
+	int		changeable_values, all_pages;
+	int		valid_page = 0;
+	int		len, limit;
+
+	if ((common->cmnd[1] & ~0x08) != 0) {	/* Mask away DBD */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	pc = common->cmnd[2] >> 6;
+	page_code = common->cmnd[2] & 0x3f;
+	if (pc == 3) {
+		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+	changeable_values = (pc == 1);
+	all_pages = (page_code == 0x3f);
+
+	/*
+	 * Write the mode parameter header.  Fixed values are: default
+	 * medium type, no cache control (DPOFUA), and no block descriptors.
+	 * The only variable value is the WriteProtect bit.  We will fill in
+	 * the mode data length later.
+	 */
+	memset(buf, 0, 8);
+	if (mscmnd == MODE_SENSE) {
+		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 4;
+		limit = 255;
+	} else {			/* MODE_SENSE_10 */
+		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 8;
+		limit = 65535;		/* Should really be FSG_BUFLEN */
+	}
+
+	/* No block descriptors */
+
+	/*
+	 * The mode pages, in numerical order.  The only page we support
+	 * is the Caching page.
+	 */
+	if (page_code == 0x08 || all_pages) {
+		valid_page = 1;
+		buf[0] = 0x08;		/* Page code */
+		buf[1] = 10;		/* Page length */
+		memset(buf+2, 0, 10);	/* None of the fields are changeable */
+
+		if (!changeable_values) {
+			buf[2] = 0x04;	/* Write cache enable, */
+					/* Read cache not disabled */
+					/* No cache retention priorities */
+			put_unaligned_be16(0xffff, &buf[4]);
+					/* Don't disable prefetch */
+					/* Minimum prefetch = 0 */
+			put_unaligned_be16(0xffff, &buf[8]);
+					/* Maximum prefetch */
+			put_unaligned_be16(0xffff, &buf[10]);
+					/* Maximum prefetch ceiling */
+		}
+		buf += 12;
+	}
+
+	/*
+	 * Check that a valid page was requested and the mode data length
+	 * isn't too long.
+	 */
+	len = buf - buf0;
+	if (!valid_page || len > limit) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	/*  Store the mode data length */
+	if (mscmnd == MODE_SENSE)
+		buf0[0] = len - 1;
+	else
+		put_unaligned_be16(len - 2, buf0);
+	return len;
+}
+
+
+
+static int do_start_stop(struct fsg_common *common)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		loej, start;
+
+	if (!curlun) {
+		return -EINVAL;
+	} else if (!curlun->removable) {
+		curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	} else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
+		   (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	
+	USBSTACK_DBG("MSG[%d] do_start_stop cmnd[4]: 0x%x", 
+		((curlun->cdrom)?0:1), common->cmnd[4]);
+
+	loej  = common->cmnd[4] & 0x02;
+	start = common->cmnd[4] & 0x01;
+
+	if(start && loej){
+		curlun->sense_data = SS_INVALID_COMMAND;
+		USBSTACK_DBG("MSG[%d] start-stop fail!", (curlun->cdrom)?0:1);
+		return -EINVAL;
+	}
+
+	/*
+	 * Our emulation doesn't support mounting; the medium is
+	 * available for use as soon as it is loaded.
+	 */
+	if (start) {
+		if (!fsg_lun_is_open(curlun)) {
+			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	/* Are we allowed to unload the media? */
+	if (curlun->prevent_medium_removal) {
+		LDBG(curlun, "unload attempt prevented\n");
+		curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
+		return -EINVAL;
+	}
+
+	if (!loej)
+		return 0;
+
+	/* Simulate an unload/eject */
+	if (common->ops && common->ops->pre_eject) {
+		int r = common->ops->pre_eject(common, curlun,
+					       curlun - common->luns);
+		if (unlikely(r < 0))
+			return r;
+		else if (r)
+			return 0;
+	}
+
+	up_read(&common->filesem);
+	down_write(&common->filesem);
+	fsg_lun_close(curlun);
+	up_write(&common->filesem);
+	down_read(&common->filesem);
+
+	//ÕâÀﵯ³ö¹ýÔ磬PC»áÓÐÒì³£Ìáʾ
+	//usb_notify_up(USB_CDROM_OBJECT, NULL);
+	usb_do_reject_cdrom();
+	
+	return common->ops && common->ops->post_eject
+		? min(0, common->ops->post_eject(common, curlun,
+						 curlun - common->luns))
+		: 0;
+}
+
+static int do_prevent_allow(struct fsg_common *common)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		prevent;
+
+	if (!common->curlun) {
+		return -EINVAL;
+	} else if (!common->curlun->removable) {
+		common->curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	}
+
+	USBSTACK_DBG("MSG[%d] do_prevent_allow cmnd[4]: 0x%x", 
+		((curlun->cdrom)?0:1), common->cmnd[4]);
+
+	prevent = common->cmnd[4] & 0x01;
+
+	if(prevent == 0x1){
+		common->curlun->sense_data = SS_INVALID_COMMAND;
+		USBSTACK_DBG("MSG[%d] do_prevent_allow return 1", ((curlun->cdrom)?0:1));
+		return -EINVAL;
+	}	
+
+	
+	if ((common->cmnd[4] & ~0x01) != 0) {	/* Mask away Prevent */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		USBSTACK_DBG("MSG[%d] do_prevent_allow return 2", ((curlun->cdrom)?0:1));
+		return -EINVAL;
+	}
+
+	if (curlun->prevent_medium_removal && !prevent)
+		fsg_lun_fsync_sub(curlun);
+	curlun->prevent_medium_removal = prevent;
+	return 0;
+}
+
+static int do_read_format_capacities(struct fsg_common *common,
+			struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u8		*buf = (u8 *) bh->buf;
+
+	buf[0] = buf[1] = buf[2] = 0;
+	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
+	buf += 4;
+
+	put_unaligned_be32(curlun->num_sectors, &buf[0]);
+						/* Number of blocks */
+	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
+	buf[4] = 0x02;				/* Current capacity */
+	return 12;
+}
+
+static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+
+	/* We don't support MODE SELECT */
+	if (curlun)
+		curlun->sense_data = SS_INVALID_COMMAND;
+	return -EINVAL;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	rc = fsg_set_halt(fsg, fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint halt\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_halt(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	DBG(fsg, "bulk-in set wedge\n");
+	rc = usb_ep_set_wedge(fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_wedge(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int throw_away_data(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	u32			amount;
+	int			rc;
+
+	for (bh = common->next_buffhd_to_drain;
+	     bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
+	     bh = common->next_buffhd_to_drain) {
+
+		/* Throw away the data in a filled buffer */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			bh->state = BUF_STATE_EMPTY;
+			common->next_buffhd_to_drain = bh->next;
+
+			/* A short packet or an error ends everything */
+			if (bh->outreq->actual < bh->bulk_out_intended_length ||
+			    bh->outreq->status != 0) {
+				raise_exception(common,
+						FSG_STATE_ABORT_BULK_OUT);
+				return -EINTR;
+			}
+			continue;
+		}
+
+		/* Try to submit another request if we need one */
+		bh = common->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY
+		 && common->usb_amount_left > 0) {
+			amount = min(common->usb_amount_left, FSG_BUFLEN);
+
+			/*
+			 * Except at the end of the transfer, amount will be
+			 * equal to the buffer size, which is divisible by
+			 * the bulk-out maxpacket size.
+			 */
+			set_bulk_out_req_length(common, bh, amount);
+			if (!start_out_transfer(common, bh))
+				/* Dunno what to do if common->fsg is NULL */
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			common->usb_amount_left -= amount;
+			continue;
+		}
+
+		/* Otherwise wait for something to happen */
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static int finish_reply(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
+	int			rc = 0;
+
+
+	switch (common->data_dir) {
+	case DATA_DIR_NONE:
+		break;			/* Nothing to send */
+
+	/*
+	 * If we don't know whether the host wants to read or write,
+	 * this must be CB or CBI with an unknown command.  We mustn't
+	 * try to send or receive any data.  So stall both bulk pipes
+	 * if we can and wait for a reset.
+	 */
+	case DATA_DIR_UNKNOWN:
+		if (!common->can_stall) {
+			/* Nothing */
+		} else if (fsg_is_set(common)) {
+			fsg_set_halt(common->fsg, common->fsg->bulk_out);
+			rc = halt_bulk_in_endpoint(common->fsg);
+		} else {
+			/* Don't know what to do if common->fsg is NULL */
+			rc = -EIO;
+		}
+		break;
+
+	/* All but the last buffer of data must have already been sent */
+	case DATA_DIR_TO_HOST:
+		if (common->data_size == 0) {
+			/* Nothing to send */
+
+		/* Don't know what to do if common->fsg is NULL */
+		} else if (!fsg_is_set(common)) {
+			rc = -EIO;
+
+		/* If there's no residue, simply send the last buffer */
+		} else if (common->residue == 0) {
+			bh->inreq->zero = 0;
+			if (!start_in_transfer(common, bh))
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+
+		/*
+		 * For Bulk-only, mark the end of the data with a short
+		 * packet.  If we are allowed to stall, halt the bulk-in
+		 * endpoint.  (Note: This violates the Bulk-Only Transport
+		 * specification, which requires us to pad the data if we
+		 * don't halt the endpoint.  Presumably nobody will mind.)
+		 */
+		} else {
+			bh->inreq->zero = 1;
+			if (!start_in_transfer(common, bh))
+				rc = -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			if (common->can_stall)
+				rc = halt_bulk_in_endpoint(common->fsg);
+		}
+		break;
+
+	/*
+	 * We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests.
+	 */
+	case DATA_DIR_FROM_HOST:
+		if (common->residue == 0) {
+			/* Nothing to receive */
+
+		/* Did the host stop sending unexpectedly early? */
+		} else if (common->short_packet_received) {
+			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+
+		/*
+		 * We haven't processed all the incoming data.  Even though
+		 * we may be allowed to stall, doing so would cause a race.
+		 * The controller may already have ACK'ed all the remaining
+		 * bulk-out packets, in which case the host wouldn't see a
+		 * STALL.  Not realizing the endpoint was halted, it wouldn't
+		 * clear the halt -- leading to problems later on.
+		 */
+#if 0
+		} else if (common->can_stall) {
+			if (fsg_is_set(common))
+				fsg_set_halt(common->fsg,
+					     common->fsg->bulk_out);
+			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+#endif
+
+		/*
+		 * We can't stall.  Read in the excess data and throw it
+		 * all away.
+		 */
+		} else {
+			rc = throw_away_data(common);
+		}
+		break;
+	}
+	return rc;
+}
+
+static int send_status(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	struct fsg_buffhd	*bh;
+	struct bulk_cs_wrap	*csw;
+	int			rc;
+	u8			status = US_BULK_STAT_OK;
+	u32			sd, sdinfo = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = common->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	if (curlun) {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+	} else if (common->bad_lun_okay)
+		sd = SS_NO_SENSE;
+	else
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+	if (common->phase_error) {
+		DBG(common, "sending phase-error status\n");
+		status = US_BULK_STAT_PHASE;
+		sd = SS_INVALID_COMMAND;
+	} else if (sd != SS_NO_SENSE) {
+		DBG(common, "sending command-failure status\n");
+		status = US_BULK_STAT_FAIL;
+		VDBG(common, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+				"  info x%x\n",
+				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+	}
+
+	/* Store and send the Bulk-only CSW */
+	csw = (void *)bh->buf;
+
+	csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
+	csw->Tag = common->tag;
+	csw->Residue = cpu_to_le32(common->residue);
+	csw->Status = status;
+	//win7 will send read 10 while no T card in mifi,EC61600633846
+	if(curlun && (curlun->filp == NULL) && (common->cmnd[0] == READ_10)){
+			csw->Status = US_BULK_STAT_FAIL;
+			printk("--send_status, US_BULK_STAT_FAIL\n");
+	}
+	
+	bh->inreq->length = US_BULK_CS_WRAP_LEN;
+	bh->inreq->zero = 0;
+	if (!start_in_transfer(common, bh))
+		/* Don't know what to do if common->fsg is NULL */
+		return -EIO;
+
+	common->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have.
+ */
+static int check_command(struct fsg_common *common, int cmnd_size,
+			 enum data_direction data_dir, unsigned int mask,
+			 int needs_medium, const char *name)
+{
+	int			i;
+	int			lun = common->cmnd[1] >> 5;
+	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
+	char			hdlen[20];
+	struct fsg_lun		*curlun;
+
+	hdlen[0] = 0;
+	if (common->data_dir != DATA_DIR_UNKNOWN)
+		sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
+			common->data_size);
+	VDBG(common, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+	     name, cmnd_size, dirletter[(int) data_dir],
+	     common->data_size_from_cmnd, common->cmnd_size, hdlen);
+
+	//USB_DEBUG( "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+	//     name, cmnd_size, dirletter[(int) data_dir],
+	 //    common->data_size_from_cmnd, common->cmnd_size, hdlen);
+
+	/*
+	 * We can't reply at all until we know the correct data direction
+	 * and size.
+	 */
+	if (common->data_size_from_cmnd == 0)
+		data_dir = DATA_DIR_NONE;
+	if (common->data_size < common->data_size_from_cmnd) {
+		/*
+		 * Host data size < Device data size is a phase error.
+		 * Carry out the command, but only transfer as much as
+		 * we are allowed.
+		 */
+		common->data_size_from_cmnd = common->data_size;
+		common->phase_error = 1;
+	}
+	common->residue = common->data_size;
+	common->usb_amount_left = common->data_size;
+
+	/* Conflicting data directions is a phase error */
+	if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
+		common->phase_error = 1;
+		//USBSTACK_DBG("check fail 1");
+		return -EINVAL;
+	}
+
+	/* Verify the length of the command itself */
+	if (cmnd_size != common->cmnd_size) {
+
+		/*
+		 * Special case workaround: There are plenty of buggy SCSI
+		 * implementations. Many have issues with cbw->Length
+		 * field passing a wrong command size. For those cases we
+		 * always try to work around the problem by using the length
+		 * sent by the host side provided it is at least as large
+		 * as the correct command length.
+		 * Examples of such cases would be MS-Windows, which issues
+		 * REQUEST SENSE with cbw->Length == 12 where it should
+		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
+		 * REQUEST SENSE with cbw->Length == 10 where it should
+		 * be 6 as well.
+		 */
+		if (cmnd_size <= common->cmnd_size) {
+			DBG(common, "%s is buggy! Expected length %d "
+			    "but we got %d\n", name,
+			    cmnd_size, common->cmnd_size);
+			cmnd_size = common->cmnd_size;
+		} else {
+			common->phase_error = 1;
+			//USBSTACK_DBG("check fail 2");
+			return -EINVAL;
+		}
+	}
+
+	/* Check that the LUN values are consistent */
+	if (common->lun != lun)
+		DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
+		    common->lun, lun);
+
+	/* Check the LUN */
+	curlun = common->curlun;
+	if (curlun) {
+		if (common->cmnd[0] != REQUEST_SENSE) {
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+	} else {
+		common->bad_lun_okay = 0;
+
+		/*
+		 * INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not.
+		 */
+		if (common->cmnd[0] != INQUIRY &&
+		    common->cmnd[0] != REQUEST_SENSE) {
+			DBG(common, "unsupported LUN %d\n", common->lun);
+			//USBSTACK_DBG("check fail 3");
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail.
+	 */
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+	    common->cmnd[0] != INQUIRY &&
+	    common->cmnd[0] != REQUEST_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+		//USBSTACK_DBG("check fail 4");
+		return -EINVAL;
+	}
+
+	/* Check that only command bytes listed in the mask are non-zero */
+	common->cmnd[1] &= 0x1f;			/* Mask away the LUN */
+	for (i = 1; i < cmnd_size; ++i) {
+		if (common->cmnd[i] && !(mask & (1 << i))) {
+			if (curlun)
+				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			//USBSTACK_DBG("check fail 5");
+			return -EINVAL;
+		}
+	}
+
+	/* If the medium isn't mounted and the command needs to access
+	 * it, return an error. */
+	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
+		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+		//USBSTACK_DBG("check fail 6");
+		return -EINVAL;
+	}
+
+	if(usb_get_ms_auto_reject()){
+        if(NULL ==curlun)
+           BUG_ON(1);        
+		if((common->cmnd[0] != INQUIRY) &&
+			(common->cmnd[0] != REQUEST_SENSE) &&
+			(common->cmnd[0] != MODE_SENSE) &&
+			(common->cmnd[0] != ALLOW_MEDIUM_REMOVAL)){
+			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+			return -EINVAL;
+		}
+	}
+	
+	return 0;
+}
+
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_common *common,
+		int cmnd_size, enum data_direction data_dir,
+		unsigned int mask, int needs_medium, const char *name)
+{
+	if (common->curlun)
+		common->data_size_from_cmnd <<= common->curlun->blkbits;
+	return check_command(common, cmnd_size, data_dir,
+			mask, needs_medium, name);
+}
+
+static int do_DL_usbmode(struct work_struct *data)
+{
+	struct fsg_common *com = container_of(data, struct fsg_common, work);
+	if(com->cmnd[0] == 0x99){
+#ifndef CONFIG_SYSTEM_RECOVERY
+		zDrvNand_WriteBootflag(0);
+#endif
+		usb_notify_up(USB_SWITCH_DEBUG, NULL);
+	}else if(com->cmnd[0] == 0x98){
+#ifndef CONFIG_SYSTEM_RECOVERY
+			zDrvNand_WriteBootflag(1);
+#endif
+			usb_notify_up(USB_SWITCH_USER, NULL);
+	}
+	return 1;
+}
+extern int get_usb_enum_mode(void);
+
+static int do_scsi_command(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+	int			reply = -EINVAL;
+	int			i;
+	static char		unknown[16];
+
+	dump_cdb(common);
+
+	/* Wait for the next buffer to become available for data or status */
+	bh = common->next_buffhd_to_fill;
+	common->next_buffhd_to_drain = bh;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	common->phase_error = 0;
+	common->short_packet_received = 0;
+
+	down_read(&common->filesem);	/* We're using the backing file */
+	if(common->cmnd[0] == READ_6 || common->cmnd[0] == READ_10 || common->cmnd[0] == READ_12 || 
+	   common->cmnd[0] == WRITE_6 || common->cmnd[0] == WRITE_10 || common->cmnd[0] == WRITE_12){
+	
+		//struct sched_param sch_param = { .sched_priority = 1 };
+		if(sch_param.sched_priority != 0){
+			sch_param.sched_priority = 0;
+			sched_setscheduler(current, SCHED_NORMAL, &sch_param);
+			//printk("set fsg_thread pri to 0\n");
+		}	
+	}
+	switch (common->cmnd[0]) {
+
+	case OPENDL:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				0, 1,
+				"OPENDL");
+		common->curlun->sense_data = SS_NO_SENSE;
+		if(get_usb_enum_mode() != 1)
+			schedule_work(&common->work);
+		else
+			printk("user mode is forbiden open DL\n");
+		
+		break;
+
+	case CLOSEDL_switch_to_USER:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				0, 1,
+				"CLOSEDL");
+		common->curlun->sense_data = SS_NO_SENSE;
+		if(get_usb_enum_mode() != 1)
+			schedule_work(&common->work);
+		else
+			printk("user mode is forbiden close DL\n");
+		
+		break;
+		
+	case INQUIRY:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<4), 0,
+				      "INQUIRY");
+		//USBSTACK_DBG("fsg inquiry ret: %d", reply);
+		if (reply == 0)
+			reply = do_inquiry(common, bh);
+		break;
+
+	case MODE_SELECT:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+				      (1<<1) | (1<<4), 0,
+				      "MODE SELECT(6)");
+		if (reply == 0)
+			reply = do_mode_select(common, bh);
+		break;
+
+	case MODE_SELECT_10:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+				      (1<<1) | (3<<7), 0,
+				      "MODE SELECT(10)");
+		if (reply == 0)
+			reply = do_mode_select(common, bh);
+		break;
+
+	case MODE_SENSE:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<1) | (1<<2) | (1<<4), 0,
+				      "MODE SENSE(6)");
+		if (reply == 0)
+			reply = do_mode_sense(common, bh);
+		break;
+
+	case MODE_SENSE_10:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (1<<1) | (1<<2) | (3<<7), 0,
+				      "MODE SENSE(10)");
+		if (reply == 0)
+			reply = do_mode_sense(common, bh);
+		break;
+
+	case ALLOW_MEDIUM_REMOVAL:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				      (1<<4), 0,
+				      "PREVENT-ALLOW MEDIUM REMOVAL");
+		if (reply == 0)
+			reply = do_prevent_allow(common);
+		break;
+
+	case READ_6:
+		i = common->cmnd[4];
+		common->data_size_from_cmnd = (i == 0) ? 256 : i;
+		reply = check_command_size_in_blocks(common, 6,
+				      DATA_DIR_TO_HOST,
+				      (7<<1) | (1<<4), 1,
+				      "READ(6)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case READ_10:
+		common->data_size_from_cmnd =
+				get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command_size_in_blocks(common, 10,
+				      DATA_DIR_TO_HOST,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "READ(10)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case READ_12:
+		common->data_size_from_cmnd =
+				get_unaligned_be32(&common->cmnd[6]);
+		reply = check_command_size_in_blocks(common, 12,
+				      DATA_DIR_TO_HOST,
+				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
+				      "READ(12)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case READ_CAPACITY:
+		common->data_size_from_cmnd = 8;
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (0xf<<2) | (1<<8), 1,
+				      "READ CAPACITY");
+		if (reply == 0)
+			reply = do_read_capacity(common, bh);
+		break;
+
+	case READ_HEADER:
+		if (!common->curlun || !common->curlun->cdrom)
+			goto unknown_cmnd;
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (3<<7) | (0x1f<<1), 1,
+				      "READ HEADER");
+		if (reply == 0)
+			reply = do_read_header(common, bh);
+		break;
+
+	case READ_TOC:
+		if (!common->curlun || !common->curlun->cdrom)
+			goto unknown_cmnd;
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				     (~0x0),1,//(7<<6) | (1<<1), 1,
+				      "READ TOC");
+		if (reply == 0)
+			reply = do_read_toc(common, bh);
+		break;
+
+	case READ_FORMAT_CAPACITIES:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (3<<7), 1,
+				      "READ FORMAT CAPACITIES");
+		if (reply == 0)
+			reply = do_read_format_capacities(common, bh);
+		break;
+
+	case REQUEST_SENSE:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<4), 0,
+				      "REQUEST SENSE");
+		if (reply == 0)
+			reply = do_request_sense(common, bh);
+		break;
+
+	case START_STOP:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				      (1<<1) | (1<<4), 0,
+				      "START-STOP UNIT");
+		if (reply == 0)
+			reply = do_start_stop(common);
+		break;
+
+	case SYNCHRONIZE_CACHE:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 10, DATA_DIR_NONE,
+				      (0xf<<2) | (3<<7), 1,
+				      "SYNCHRONIZE CACHE");
+		if (reply == 0)
+			reply = do_synchronize_cache(common);
+		break;
+
+	case TEST_UNIT_READY:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				0, 1,
+				"TEST UNIT READY");
+		break;
+
+	/*
+	 * Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0.
+	 */
+	case VERIFY:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 10, DATA_DIR_NONE,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "VERIFY");
+		if (reply == 0)
+			reply = do_verify(common);
+		break;
+
+	case WRITE_6:
+		i = common->cmnd[4];
+		common->data_size_from_cmnd = (i == 0) ? 256 : i;
+		reply = check_command_size_in_blocks(common, 6,
+				      DATA_DIR_FROM_HOST,
+				      (7<<1) | (1<<4), 1,
+				      "WRITE(6)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	case WRITE_10:
+		common->data_size_from_cmnd =
+				get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command_size_in_blocks(common, 10,
+				      DATA_DIR_FROM_HOST,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "WRITE(10)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	case WRITE_12:
+		common->data_size_from_cmnd =
+				get_unaligned_be32(&common->cmnd[6]);
+		reply = check_command_size_in_blocks(common, 12,
+				      DATA_DIR_FROM_HOST,
+				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
+				      "WRITE(12)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	/*
+	 * Some mandatory commands that we recognize but don't implement.
+	 * They don't mean much in this setting.  It's left as an exercise
+	 * for anyone interested to implement RESERVE and RELEASE in terms
+	 * of Posix locks.
+	 */
+	case FORMAT_UNIT:
+	case RELEASE:
+	case RESERVE:
+	case SEND_DIAGNOSTIC:
+		/* Fall through */
+
+	default:
+unknown_cmnd:
+		common->data_size_from_cmnd = 0;
+		sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
+		reply = check_command(common, common->cmnd_size,
+				      DATA_DIR_UNKNOWN, ~0, 0, unknown);
+		if ((reply == 0) && (NULL !=common->curlun)) {
+			common->curlun->sense_data = SS_INVALID_COMMAND;
+			reply = -EINVAL;
+		}
+		break;
+	}
+	up_read(&common->filesem);
+	if(sch_param.sched_priority != 37){
+		sch_param.sched_priority = 37;
+		sched_setscheduler(current, SCHED_FIFO, &sch_param);
+		//printk("set fsg_thread pri to 37\n");
+	}
+
+	if (reply == -EINTR || signal_pending(current))
+		return -EINTR;
+
+	/* Set up the single reply buffer for finish_reply() */
+	if (reply == -EINVAL)
+		reply = 0;		/* Error reply length */
+	if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
+		reply = min((u32)reply, common->data_size_from_cmnd);
+		bh->inreq->length = reply;
+		bh->state = BUF_STATE_FULL;
+		common->residue -= reply;
+	}				/* Otherwise it's already set */
+
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = bh->outreq;
+	struct bulk_cb_wrap	*cbw = req->buf;
+	struct fsg_common	*common = fsg->common;
+
+	/* Was this a real packet?  Should it be ignored? */
+	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags) || (req->actual == 0))
+		return -EINVAL;
+
+	/* Is the CBW valid? */
+	if (req->actual != US_BULK_CB_WRAP_LEN ||
+			cbw->Signature != cpu_to_le32(
+				US_BULK_CB_SIGN)) {
+		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+				req->actual,
+				le32_to_cpu(cbw->Signature));
+
+		/*
+		 * The Bulk-only spec says we MUST stall the IN endpoint
+		 * (6.6.1), so it's unavoidable.  It also says we must
+		 * retain this state until the next reset, but there's
+		 * no way to tell the controller driver it should ignore
+		 * Clear-Feature(HALT) requests.
+		 *
+		 * We aren't required to halt the OUT endpoint; instead
+		 * we can simply accept and discard any data received
+		 * until the next reset.
+		 */
+		wedge_bulk_in_endpoint(fsg);
+		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+		return -EINVAL;
+	}
+
+	/* Is the CBW meaningful? */
+	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
+			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+				"cmdlen %u\n",
+				cbw->Lun, cbw->Flags, cbw->Length);
+
+		/*
+		 * We can do anything we want here, so let's stall the
+		 * bulk pipes if we are allowed to.
+		 */
+		if (common->can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			halt_bulk_in_endpoint(fsg);
+		}
+		return -EINVAL;
+	}
+
+	/* Save the command for later */
+	common->cmnd_size = cbw->Length;
+	memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
+	if (cbw->Flags & US_BULK_FLAG_IN)
+		common->data_dir = DATA_DIR_TO_HOST;
+	else
+		common->data_dir = DATA_DIR_FROM_HOST;
+	common->data_size = le32_to_cpu(cbw->DataTransferLength);
+	if (common->data_size == 0)
+		common->data_dir = DATA_DIR_NONE;
+	common->lun = cbw->Lun;
+	if (common->lun < common->nluns)
+		common->curlun = &common->luns[common->lun];
+	else
+		common->curlun = NULL;
+	common->tag = cbw->Tag;
+	return 0;
+}
+
+static int get_next_command(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	int			rc = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = common->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	/* Queue a request to read a Bulk-only CBW */
+	set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
+	if (!start_out_transfer(common, bh))
+		/* Don't know what to do if common->fsg is NULL */
+		return -EIO;
+
+	/*
+	 * We will drain the buffer in software, which means we
+	 * can reuse it for the next filling.  No need to advance
+	 * next_buffhd_to_fill.
+	 */
+
+	/* Wait for the CBW to arrive */
+	while (bh->state != BUF_STATE_FULL) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	smp_rmb();
+	rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
+	bh->state = BUF_STATE_EMPTY;
+
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
+		struct usb_request **preq)
+{
+	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
+	if (*preq)
+		return 0;
+	ERROR(common, "can't allocate request for %s\n", ep->name);
+	return -ENOMEM;
+}
+
+/* Reset interface setting and re-init endpoint state (toggle etc). */
+static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
+{
+	struct fsg_dev *fsg;
+	int i, rc = 0;
+
+	if (common->running)
+		DBG(common, "reset interface\n");
+
+reset:
+	/* Deallocate the requests */
+	if (common->fsg) {
+		fsg = common->fsg;
+
+		for (i = 0; i < fsg_num_buffers; ++i) {
+			struct fsg_buffhd *bh = &common->buffhds[i];
+
+			if (bh->inreq) {
+				usb_ep_free_request(fsg->bulk_in, bh->inreq);
+				bh->inreq = NULL;
+			}
+			if (bh->outreq) {
+				usb_ep_free_request(fsg->bulk_out, bh->outreq);
+				bh->outreq = NULL;
+			}
+		}
+
+		/* Disable the endpoints */
+		if (fsg->bulk_in_enabled) {
+			usb_ep_disable(fsg->bulk_in);
+			fsg->bulk_in_enabled = 0;
+		}
+		if (fsg->bulk_out_enabled) {
+			usb_ep_disable(fsg->bulk_out);
+			fsg->bulk_out_enabled = 0;
+		}
+
+		common->fsg = NULL;
+		wake_up(&common->fsg_wait);
+	}
+
+	common->running = 0;
+	if (!new_fsg || rc)
+		return rc;
+
+	common->fsg = new_fsg;
+	fsg = common->fsg;
+
+	/* Enable the endpoints */
+	rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
+	if (rc)
+		goto reset;
+	rc = usb_ep_enable(fsg->bulk_in);
+	if (rc)
+		goto reset;
+	fsg->bulk_in->driver_data = common;
+	fsg->bulk_in_enabled = 1;
+
+	rc = config_ep_by_speed(common->gadget, &(fsg->function),
+				fsg->bulk_out);
+	if (rc)
+		goto reset;
+	rc = usb_ep_enable(fsg->bulk_out);
+	if (rc)
+		goto reset;
+	fsg->bulk_out->driver_data = common;
+	fsg->bulk_out_enabled = 1;
+	common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
+	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+
+	/* Allocate the requests */
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		struct fsg_buffhd	*bh = &common->buffhds[i];
+
+		rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
+		if (rc)
+			goto reset;
+		rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
+		if (rc)
+			goto reset;
+		bh->inreq->buf = bh->outreq->buf = bh->buf;
+		bh->inreq->context = bh->outreq->context = bh;
+		bh->inreq->complete = bulk_in_complete;
+		bh->outreq->complete = bulk_out_complete;
+	}
+
+	common->running = 1;
+	for (i = 0; i < common->nluns; ++i)
+		common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+	return rc;
+}
+
+
+/****************************** ALT CONFIGS ******************************/
+
+static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct fsg_dev *fsg = fsg_from_func(f);
+	
+	#ifdef CONFIG_PM	
+	if(fsg->suspend_state == 1)
+		fsg->suspend_state = 0;
+	#endif
+	
+	fsg->common->new_fsg = fsg;
+	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+	return USB_GADGET_DELAYED_STATUS;
+}
+
+static void fsg_disable(struct usb_function *f)
+{
+	struct fsg_dev *fsg = fsg_from_func(f);
+#if 0
+	if(fsg->suspend_state == 1)
+		return;
+#endif	
+	fsg->common->new_fsg = NULL;
+	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+}
+
+#ifdef CONFIG_PM
+unsigned int g_fsg_suspend_cnt = 0;
+unsigned int g_fsg_resume_cnt = 0;
+static void fsg_suspend(struct usb_function *f)
+{
+    struct fsg_dev *fsg = fsg_from_func(f);
+
+    g_fsg_suspend_cnt++;
+	usb_printk("%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_fsg_suspend_cnt);
+	USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_fsg_suspend_cnt);
+	fsg->suspend_state = 1;
+//	fsg_disable(f);
+    usb_ep_disable(fsg->bulk_in);
+    fsg->bulk_in_enabled = 0;
+//    usb_ep_disable(fsg->bulk_out);
+//    fsg->bulk_out_enabled = 0;
+}
+static void fsg_resume(struct usb_function *f)
+{
+    struct fsg_dev *fsg = fsg_from_func(f);
+
+    g_fsg_resume_cnt++;
+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_fsg_resume_cnt);
+	USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_fsg_resume_cnt);
+//	fsg_set_alt(f, NULL, NULL);
+    //usb_ep_enable(fsg->bulk_in);
+    usb_ep_resume_enable(fsg->bulk_in);
+    fsg->bulk_in_enabled = 1;
+//    usb_ep_enable(fsg->bulk_out);
+//    fsg->bulk_out_enabled = 1;
+    fsg->suspend_state = 0;
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_common *common)
+{
+	siginfo_t		info;
+	int			i;
+	struct fsg_buffhd	*bh;
+	enum fsg_state		old_state;
+	struct fsg_lun		*curlun;
+	unsigned int		exception_req_tag;
+
+	/*
+	 * Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception.
+	 */
+	for (;;) {
+		int sig =
+			dequeue_signal_lock(current, &current->blocked, &info);
+		if (!sig)
+			break;
+		if (sig != SIGUSR1) {
+			if (common->state < FSG_STATE_EXIT)
+				DBG(common, "Main thread exiting on signal\n");
+			raise_exception(common, FSG_STATE_EXIT);
+		}
+	}
+
+	/* Cancel all the pending transfers */
+	if (likely(common->fsg)) {
+		for (i = 0; i < fsg_num_buffers; ++i) {
+			bh = &common->buffhds[i];
+			if (bh->inreq_busy)
+				usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
+			if (bh->outreq_busy)
+				usb_ep_dequeue(common->fsg->bulk_out,
+					       bh->outreq);
+		}
+
+		/* Wait until everything is idle */
+		for (;;) {
+			int num_active = 0;
+			for (i = 0; i < fsg_num_buffers; ++i) {
+				bh = &common->buffhds[i];
+				num_active += bh->inreq_busy + bh->outreq_busy;
+			}
+			if (num_active == 0)
+				break;
+			if (sleep_thread(common))
+				return;
+		}
+
+		/* Clear out the controller's fifos */
+		if (common->fsg->bulk_in_enabled)
+			usb_ep_fifo_flush(common->fsg->bulk_in);
+		if (common->fsg->bulk_out_enabled)
+			usb_ep_fifo_flush(common->fsg->bulk_out);
+	}
+
+	/*
+	 * Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler.
+	 */
+	spin_lock_irq(&common->lock);
+
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		bh = &common->buffhds[i];
+		bh->state = BUF_STATE_EMPTY;
+	}
+	common->next_buffhd_to_fill = &common->buffhds[0];
+	common->next_buffhd_to_drain = &common->buffhds[0];
+	exception_req_tag = common->exception_req_tag;
+	old_state = common->state;
+
+	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+		common->state = FSG_STATE_STATUS_PHASE;
+	else {
+		for (i = 0; i < common->nluns; ++i) {
+			curlun = &common->luns[i];
+			curlun->prevent_medium_removal = 0;
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->unit_attention_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+		common->state = FSG_STATE_IDLE;
+	}
+	spin_unlock_irq(&common->lock);
+
+	/* Carry out any extra actions required for the exception */
+	switch (old_state) {
+	case FSG_STATE_ABORT_BULK_OUT:
+		send_status(common);
+		spin_lock_irq(&common->lock);
+		if (common->state == FSG_STATE_STATUS_PHASE)
+			common->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&common->lock);
+		break;
+
+	case FSG_STATE_RESET:
+		/*
+		 * In case we were forced against our will to halt a
+		 * bulk endpoint, clear the halt now.  (The SuperH UDC
+		 * requires this.)
+		 */
+		if (!fsg_is_set(common))
+			break;
+		if (test_and_clear_bit(IGNORE_BULK_OUT,
+				       &common->fsg->atomic_bitflags))
+			usb_ep_clear_halt(common->fsg->bulk_in);
+
+		if (common->ep0_req_tag == exception_req_tag)
+			ep0_queue(common);	/* Complete the status stage */
+
+		/*
+		 * Technically this should go here, but it would only be
+		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
+		 * CONFIG_CHANGE cases.
+		 */
+		/* for (i = 0; i < common->nluns; ++i) */
+		/*	common->luns[i].unit_attention_data = */
+		/*		SS_RESET_OCCURRED;  */
+		break;
+
+	case FSG_STATE_CONFIG_CHANGE:
+		do_set_interface(common, common->new_fsg);
+		if (common->new_fsg)
+			usb_composite_setup_continue(common->cdev);
+		break;
+
+	case FSG_STATE_EXIT:
+	case FSG_STATE_TERMINATED:
+		do_set_interface(common, NULL);		/* Free resources */
+		spin_lock_irq(&common->lock);
+		common->state = FSG_STATE_TERMINATED;	/* Stop the thread */
+		spin_unlock_irq(&common->lock);
+		break;
+
+	case FSG_STATE_INTERFACE_CHANGE:
+	case FSG_STATE_DISCONNECT:
+	case FSG_STATE_COMMAND_PHASE:
+	case FSG_STATE_DATA_PHASE:
+	case FSG_STATE_STATUS_PHASE:
+	case FSG_STATE_IDLE:
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *common_)
+{
+	struct fsg_common	*common = common_;
+
+	/*
+	 * Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1.
+	 */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	/*
+	 * Arrange for userspace references to be interpreted as kernel
+	 * pointers.  That way we can pass a kernel pointer to a routine
+	 * that expects a __user pointer and it will work okay.
+	 */
+	set_fs(get_ds());
+#if 1	
+	sch_param.sched_priority = 37;
+	sched_setscheduler(current, SCHED_FIFO, &sch_param);
+#endif
+
+	/* The main loop */
+	while (common->state != FSG_STATE_TERMINATED) {
+		if (exception_in_progress(common) || signal_pending(current)) {
+			handle_exception(common);
+			continue;
+		}
+
+		if (!common->running) {
+			sleep_thread(common);
+			continue;
+		}
+
+		if (get_next_command(common))
+			continue;
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_DATA_PHASE;
+		spin_unlock_irq(&common->lock);
+
+		if (do_scsi_command(common) || finish_reply(common))
+			continue;
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_STATUS_PHASE;
+		spin_unlock_irq(&common->lock);
+
+		if (send_status(common))
+			continue;
+
+		//add mods eject cdrom
+		usb_is_reject_cdrom();
+
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&common->lock);
+#if 0
+		if(usb_is_reject_cdrom()){
+			msleep(100);
+		}
+#endif
+	}
+
+	spin_lock_irq(&common->lock);
+	common->thread_task = NULL;
+	spin_unlock_irq(&common->lock);
+
+	if (!common->ops || !common->ops->thread_exits
+	 || common->ops->thread_exits(common) < 0) {
+		struct fsg_lun *curlun = common->luns;
+		unsigned i = common->nluns;
+
+		down_write(&common->filesem);
+		for (; i--; ++curlun) {
+			if (!fsg_lun_is_open(curlun))
+				continue;
+
+			fsg_lun_close(curlun);
+			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+		}
+		up_write(&common->filesem);
+	}
+
+	/* Let fsg_unbind() know the thread has exited */
+	complete_and_exit(&common->thread_notifier, 0);
+}
+
+
+/*************************** DEVICE ATTRIBUTES ***************************/
+
+/* Write permission is checked per LUN in store_*() functions. */
+static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
+static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, fsg_store_nofua);
+static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
+static DEVICE_ATTR(inquiry, 0644, fsg_show_inquiry, fsg_store_inquiry);
+
+static DEVICE_ATTR(cdrom, 0644, fsg_show_cdrom, fsg_store_cdrom);
+
+/****************************** FSG COMMON ******************************/
+
+static void fsg_common_release(struct kref *ref);
+
+static void fsg_lun_release(struct device *dev)
+{
+	/* Nothing needs to be done */
+}
+
+static inline void fsg_common_get(struct fsg_common *common)
+{
+	kref_get(&common->ref);
+}
+
+static inline void fsg_common_put(struct fsg_common *common)
+{
+	kref_put(&common->ref, fsg_common_release);
+}
+
+static 	struct fsg_common *fsg_common_init(struct fsg_common *common,
+					  struct usb_composite_dev *cdev,
+					  struct fsg_config *cfg)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	struct fsg_buffhd *bh;
+	struct fsg_lun *curlun;
+	struct fsg_lun_config *lcfg;
+	int nluns, i, rc;
+	char *pathbuf;
+
+	rc = fsg_num_buffers_validate();
+	if (rc != 0)
+		return ERR_PTR(rc);
+
+	/* Find out how many LUNs there should be */
+	nluns = cfg->nluns;
+	if (nluns < 1 || nluns > FSG_MAX_LUNS) {
+		dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
+		return ERR_PTR(-EINVAL);
+	}
+	
+	sch_param.sched_priority = 37;
+
+	/* Allocate? */
+	if (!common) {
+		common = kzalloc(sizeof *common, GFP_KERNEL);
+		if (!common)
+			return ERR_PTR(-ENOMEM);
+		common->free_storage_on_release = 1;
+	} else {
+		memset(common, 0, sizeof *common);
+		common->free_storage_on_release = 0;
+	}
+
+	common->buffhds = kcalloc(fsg_num_buffers,
+				  sizeof *(common->buffhds), GFP_KERNEL);
+	if (!common->buffhds) {
+		if (common->free_storage_on_release)
+			kfree(common);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	common->ops = cfg->ops;
+	common->private_data = cfg->private_data;
+    if(NULL ==gadget)
+    {
+        usb_printk( "#### no gadget obj \n");
+        kfree(common);
+		return -EINVAL;
+    }
+  
+	usb_printk("#### gadget name:%s \n", gadget->name);	
+	common->gadget = gadget;
+	common->ep0 = gadget->ep0;
+	common->ep0req = cdev->req;
+	common->cdev = cdev;
+
+
+	/* Maybe allocate device-global string IDs, and patch descriptors */
+	if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
+		rc = usb_string_id(cdev);
+		if (unlikely(rc < 0))
+			goto error_release;
+		fsg_strings[FSG_STRING_INTERFACE].id = rc;
+		fsg_intf_desc.iInterface = rc;
+	}
+
+	/*
+	 * Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs.
+	 */
+	curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
+	if (unlikely(!curlun)) {
+		rc = -ENOMEM;
+		goto error_release;
+	}
+	common->luns = curlun;
+
+	init_rwsem(&common->filesem);
+
+	for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
+		curlun->cdrom = !!lcfg->cdrom;
+		curlun->ro = lcfg->cdrom || lcfg->ro;
+		curlun->initially_ro = curlun->ro;
+		curlun->removable = lcfg->removable;
+		curlun->dev.release = fsg_lun_release;
+		curlun->dev.parent = &gadget->dev;
+		/* curlun->dev.driver = &fsg_driver.driver; XXX */
+		dev_set_drvdata(&curlun->dev, &common->filesem);
+		dev_set_name(&curlun->dev,
+			     cfg->lun_name_format
+			   ? cfg->lun_name_format
+			   : "lun%d",
+			     i);
+
+		snprintf(curlun->inquiry_string, sizeof curlun->inquiry_string,
+			 "%-8s%-16s%4s",  "DEMO",
+			 (curlun->cdrom ? "USB SCSI CD-ROM" : "MMC Storage"),
+			 "2.31");
+
+		rc = device_register(&curlun->dev);
+		if (rc) {
+			INFO(common, "failed to register LUN%d: %d\n", i, rc);
+			common->nluns = i;
+			put_device(&curlun->dev);
+			goto error_release;
+		}
+
+		rc = device_create_file(&curlun->dev, &dev_attr_ro);
+		if (rc)
+			goto error_luns;
+		rc = device_create_file(&curlun->dev, &dev_attr_file);
+		if (rc)
+			goto error_luns;
+		rc = device_create_file(&curlun->dev, &dev_attr_nofua);
+		if (rc)
+			goto error_luns;
+		rc = device_create_file(&curlun->dev, &dev_attr_inquiry);
+		if (rc)
+			goto error_luns;
+		rc = device_create_file(&curlun->dev, &dev_attr_cdrom);
+		if (rc)
+			goto error_luns;
+
+
+		if (lcfg->filename) {
+			rc = fsg_lun_open(curlun, lcfg->filename);
+			if (rc)
+				goto error_luns;
+		} else if (!curlun->removable) {
+			ERROR(common, "no file given for LUN%d\n", i);
+			rc = -EINVAL;
+			goto error_luns;
+		}
+	}
+	common->nluns = nluns;
+
+	/* Data buffers cyclic list */
+	bh = common->buffhds;
+	i = fsg_num_buffers;
+	goto buffhds_first_it;
+	do {
+		bh->next = bh + 1;
+		++bh;
+buffhds_first_it:
+		bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
+		if (unlikely(!bh->buf)) {
+			rc = -ENOMEM;
+			goto error_release;
+		}
+	} while (--i);
+	bh->next = common->buffhds;
+
+	/* Prepare inquiryString */
+	if (cfg->release != 0xffff) {
+		i = cfg->release;
+	} else {
+		i = usb_gadget_controller_number(gadget);
+		if (i >= 0) {
+			i = 0x0300 + i;
+		} else {
+					usb_printk("controller '%s' not recognized\n",
+				gadget->name);
+			WARNING(common, "controller '%s' not recognized\n",
+				gadget->name);
+			i = 0x0399;
+		}
+	}
+	snprintf(common->inquiry_string, sizeof common->inquiry_string,
+		 "%-8s%-16s%04x", cfg->vendor_name ?: "Linux",
+		 /* Assume product name dependent on the first LUN */
+		 cfg->product_name ?: (common->luns->cdrom
+				     ? "File-Stor Gadget"
+				     : "File-CD Gadget"),
+		 i);
+
+	/*
+	 * Some peripheral controllers are known not to be able to
+	 * halt bulk endpoints correctly.  If one of them is present,
+	 * disable stalls.
+	 */
+	common->can_stall = cfg->can_stall &&
+		!(gadget_is_at91(common->gadget));
+
+	spin_lock_init(&common->lock);
+	kref_init(&common->ref);
+
+	/* Tell the thread to start working */
+	common->thread_task =
+		kthread_create(fsg_main_thread, common,
+			       cfg->thread_name ?: "file-storage");
+	if (IS_ERR(common->thread_task)) {
+		rc = PTR_ERR(common->thread_task);
+		goto error_release;
+	}
+	init_completion(&common->thread_notifier);
+	init_waitqueue_head(&common->fsg_wait);
+	INIT_WORK(&common->work, do_DL_usbmode);
+	/* Information */
+	INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
+	INFO(common, "Number of LUNs=%d\n", common->nluns);
+
+	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+	for (i = 0, nluns = common->nluns, curlun = common->luns;
+	     i < nluns;
+	     ++curlun, ++i) {
+		char *p = "(no medium)";
+
+		if (fsg_lun_is_open(curlun)) {
+			p = "(error)";
+			if (pathbuf) {
+				p = d_path(&curlun->filp->f_path,
+					   pathbuf, PATH_MAX);
+				if (IS_ERR(p))
+					p = "(error)";
+			}
+		}
+		LINFO(curlun, "LUN: %s%s%sfile: %s\n",
+		      curlun->removable ? "removable " : "",
+		      curlun->ro ? "read only " : "",
+		      curlun->cdrom ? "CD-ROM " : "",
+		      p);
+	}
+	kfree(pathbuf);
+
+	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
+
+	wake_up_process(common->thread_task);
+
+	return common;
+
+error_luns:
+	common->nluns = i + 1;
+error_release:
+	common->state = FSG_STATE_TERMINATED;	/* The thread is dead */
+	/* Call fsg_common_release() directly, ref might be not initialised. */
+	fsg_common_release(&common->ref);
+	return ERR_PTR(rc);
+}
+
+static void fsg_common_release(struct kref *ref)
+{
+	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
+
+	/* If the thread isn't already dead, tell it to exit now */
+	if (common->state != FSG_STATE_TERMINATED) {
+		raise_exception(common, FSG_STATE_EXIT);
+		wait_for_completion(&common->thread_notifier);
+	}
+
+	if (likely(common->luns)) {
+		struct fsg_lun *lun = common->luns;
+		unsigned i = common->nluns;
+
+		/* In error recovery common->nluns may be zero. */
+		for (; i; --i, ++lun) {
+			device_remove_file(&lun->dev, &dev_attr_nofua);
+			device_remove_file(&lun->dev, &dev_attr_ro);
+			device_remove_file(&lun->dev, &dev_attr_file);
+			device_remove_file(&lun->dev, &dev_attr_cdrom);
+			fsg_lun_close(lun);
+			device_unregister(&lun->dev);
+		}
+
+		kfree(common->luns);
+	}
+
+	{
+		struct fsg_buffhd *bh = common->buffhds;
+		unsigned i = fsg_num_buffers;
+		do {
+			kfree(bh->buf);
+		} while (++bh, --i);
+	}
+
+	kfree(common->buffhds);
+	if (common->free_storage_on_release)
+		kfree(common);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	int i;
+	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct fsg_common	*common = fsg->common;
+
+
+	USBSTACK_DBG("fsg_unbind");
+	DBG(fsg, "unbind\n");
+	if (fsg->common->fsg == fsg) {
+		fsg->common->new_fsg = NULL;
+		raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+		/* FIXME: make interruptible or killable somehow? */
+		wait_event(common->fsg_wait, common->fsg != fsg);
+	}
+
+
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		struct fsg_buffhd *bh = &common->buffhds[i];
+
+		if (bh->inreq) {
+			usb_ep_free_request(fsg->bulk_in, bh->inreq);
+			bh->inreq = NULL;
+		}
+		if (bh->outreq) {
+			usb_ep_free_request(fsg->bulk_out, bh->outreq);
+			bh->outreq = NULL;
+		}
+	}
+
+	/* Disable the endpoints */
+	if (fsg->bulk_in_enabled) {
+		usb_ep_disable(fsg->bulk_in);
+		fsg->bulk_in_enabled = 0;
+	}
+	if (fsg->bulk_out_enabled) {
+		usb_ep_disable(fsg->bulk_out);
+		fsg->bulk_out_enabled = 0;
+	}
+
+	fsg->bulk_in->driver_data = NULL;	
+	fsg->bulk_out->driver_data = NULL;	
+
+	fsg_common_put(common);
+	usb_free_descriptors(fsg->function.descriptors);
+	usb_free_descriptors(fsg->function.hs_descriptors);
+	usb_free_descriptors(fsg->function.ss_descriptors);
+	kfree(fsg);
+}
+
+static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct usb_gadget	*gadget = c->cdev->gadget;
+	int			i;
+	struct usb_ep		*ep;
+
+	fsg->gadget = gadget;
+
+	/* New interface */
+	i = usb_interface_id(c, f);
+	if (i < 0)
+		return i;
+	fsg_intf_desc.bInterfaceNumber = i;
+	fsg->interface_number = i;
+
+	/* Find all the endpoints we will use */
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg->common;	/* claim the endpoint */
+	fsg->bulk_in = ep;
+
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg->common;	/* claim the endpoint */
+	fsg->bulk_out = ep;
+
+	/* Copy descriptors */
+	f->descriptors = usb_copy_descriptors(fsg_fs_function);
+	if (unlikely(!f->descriptors))
+		return -ENOMEM;
+
+	if (gadget_is_dualspeed(gadget)) {
+		/* Assume endpoint addresses are the same for both speeds */
+		fsg_hs_bulk_in_desc.bEndpointAddress =
+			fsg_fs_bulk_in_desc.bEndpointAddress;
+		fsg_hs_bulk_out_desc.bEndpointAddress =
+			fsg_fs_bulk_out_desc.bEndpointAddress;
+		f->hs_descriptors = usb_copy_descriptors(fsg_hs_function);
+		if (unlikely(!f->hs_descriptors)) {
+			usb_free_descriptors(f->descriptors);
+			return -ENOMEM;
+		}
+	}
+
+	if (gadget_is_superspeed(gadget)) {
+		unsigned	max_burst;
+
+		/* Calculate bMaxBurst, we know packet size is 1024 */
+		max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
+
+		fsg_ss_bulk_in_desc.bEndpointAddress =
+			fsg_fs_bulk_in_desc.bEndpointAddress;
+		fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
+
+		fsg_ss_bulk_out_desc.bEndpointAddress =
+			fsg_fs_bulk_out_desc.bEndpointAddress;
+		fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
+
+		f->ss_descriptors = usb_copy_descriptors(fsg_ss_function);
+		if (unlikely(!f->ss_descriptors)) {
+			usb_free_descriptors(f->hs_descriptors);
+			usb_free_descriptors(f->descriptors);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+
+autoconf_fail:
+	ERROR(fsg, "unable to autoconfigure all endpoints\n");
+	return -ENOTSUPP;
+}
+
+
+/****************************** ADD FUNCTION ******************************/
+
+static struct usb_gadget_strings *fsg_strings_array[] = {
+	&fsg_stringtab,
+	NULL,
+};
+
+static int fsg_bind_config(struct usb_composite_dev *cdev,
+			   struct usb_configuration *c,
+			   struct fsg_common *common)
+{
+	struct fsg_dev *fsg;
+	int rc;
+
+	fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+	if (unlikely(!fsg))
+		return -ENOMEM;
+
+	fsg->function.name        = FSG_DRIVER_DESC;
+	fsg->function.strings     = fsg_strings_array;
+	fsg->function.bind        = fsg_bind;
+	fsg->function.unbind      = fsg_unbind;
+	fsg->function.setup       = fsg_setup;
+	fsg->function.set_alt     = fsg_set_alt;
+	fsg->function.disable     = fsg_disable;
+#ifdef CONFIG_PM
+	fsg->function.suspend = fsg_suspend;
+	fsg->function.resume = fsg_resume;
+#endif
+
+	fsg->common               = common;
+	/*
+	 * Our caller holds a reference to common structure so we
+	 * don't have to be worry about it being freed until we return
+	 * from this function.  So instead of incrementing counter now
+	 * and decrement in error recovery we increment it only when
+	 * call to usb_add_function() was successful.
+	 */
+
+	rc = usb_add_function(c, &fsg->function);
+	if (unlikely(rc))
+		kfree(fsg);
+	else
+		fsg_common_get(fsg->common);
+	return rc;
+}
+
+static inline int __deprecated __maybe_unused
+fsg_add(struct usb_composite_dev *cdev, struct usb_configuration *c,
+	struct fsg_common *common)
+{
+	return fsg_bind_config(cdev, c, common);
+}
+
+
+/************************* Module parameters *************************/
+
+struct fsg_module_parameters {
+	char		*file[FSG_MAX_LUNS];
+	bool		ro[FSG_MAX_LUNS];
+	bool		removable[FSG_MAX_LUNS];
+	bool		cdrom[FSG_MAX_LUNS];
+	bool		nofua[FSG_MAX_LUNS];
+
+	unsigned int	file_count, ro_count, removable_count, cdrom_count;
+	unsigned int	nofua_count;
+	unsigned int	luns;	/* nluns */
+	bool		stall;	/* can_stall */
+};
+
+#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc)	\
+	module_param_array_named(prefix ## name, params.name, type,	\
+				 &prefix ## params.name ## _count,	\
+				 S_IRUGO);				\
+	MODULE_PARM_DESC(prefix ## name, desc)
+
+#define _FSG_MODULE_PARAM(prefix, params, name, type, desc)		\
+	module_param_named(prefix ## name, params.name, type,		\
+			   S_IRUGO);					\
+	MODULE_PARM_DESC(prefix ## name, desc)
+
+#define FSG_MODULE_PARAMETERS(prefix, params)				\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp,		\
+				"names of backing files or devices");	\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool,		\
+				"true to force read-only");		\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool,	\
+				"true to simulate removable media");	\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool,		\
+				"true to simulate CD-ROM instead of disk"); \
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool,		\
+				"true to ignore SCSI WRITE(10,12) FUA bit"); \
+	_FSG_MODULE_PARAM(prefix, params, luns, uint,			\
+			  "number of LUNs");				\
+	_FSG_MODULE_PARAM(prefix, params, stall, bool,			\
+			  "false to prevent bulk stalls")
+
+static void
+fsg_config_from_params(struct fsg_config *cfg,
+		       const struct fsg_module_parameters *params)
+{
+	struct fsg_lun_config *lun;
+	unsigned i;
+
+	/* Configure LUNs */
+	cfg->nluns =
+		min(params->luns ?: (params->file_count ?: 1u),
+		    (unsigned)FSG_MAX_LUNS);
+	for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
+		lun->ro = !!params->ro[i];
+		lun->cdrom = !!params->cdrom[i];
+		lun->removable = /* Removable by default */
+			params->removable_count <= i || params->removable[i];
+		lun->filename =
+			params->file_count > i && params->file[i][0]
+			? params->file[i]
+			: 0;
+	}
+
+	/* Let MSF use defaults */
+	cfg->lun_name_format = 0;
+	cfg->thread_name = 0;
+	cfg->vendor_name = 0;
+	cfg->product_name = 0;
+	cfg->release = 0xffff;
+
+	cfg->ops = NULL;
+	cfg->private_data = NULL;
+
+	/* Finalise */
+	cfg->can_stall = params->stall;
+}
+
+static inline struct fsg_common *
+fsg_common_from_params(struct fsg_common *common,
+		       struct usb_composite_dev *cdev,
+		       const struct fsg_module_parameters *params)
+	__attribute__((unused));
+static inline struct fsg_common *
+fsg_common_from_params(struct fsg_common *common,
+		       struct usb_composite_dev *cdev,
+		       const struct fsg_module_parameters *params)
+{
+	struct fsg_config cfg;
+	fsg_config_from_params(&cfg, params);
+	return fsg_common_init(common, cdev, &cfg);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mbim.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mbim.c
new file mode 100644
index 0000000..99980a4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mbim.c
@@ -0,0 +1,3278 @@
+#include <linux/slab.h>

+#include <linux/kernel.h>

+#include <linux/device.h>

+#include <linux/etherdevice.h>

+#include <linux/list.h>

+#include <linux/atomic.h>

+#include <linux/miscdevice.h>

+#include <linux/vmalloc.h>

+#include <linux/crc32.h>

+#include <linux/if_vlan.h>

+#include <linux/usb/gadget.h>

+//#include <uapi/linux/sched.h>

+

+#include <linux/android_notify.h>

+

+#include <asm-generic/ioctl.h>

+

+#include "u_ether.h"

+#include "mbim.h"

+

+//#include "multi_packet.h"

+//#include "Sys-linux.c"

+#include <mach/highspeed_debug.h>

+

+

+#define  MBIM_CTRL_NAME  "android_mbim_ctrl"

+#define  MBIM_DATA_NAME  "android_mbim_data"

+

+#define MBIM_MAGIC  'M'

+//ioctlÃüÁî

+#define IOCTL_VNIC_SET_BLOCKTIME    _IOW(MBIM_MAGIC,0,unsigned int)

+#define IOCTL_VNIC_FREE_READ_BUF    _IOW(MBIM_MAGIC,1,unsigned int)

+#define IOCTL_VNIC_GET_WRITE_BUF    _IOR(MBIM_MAGIC,2,unsigned int)

+#define IOCTL_VNIC_CONNECT          _IOW(MBIM_MAGIC,3,unsigned int)

+#define IOCTL_VNIC_DISCONNECT       _IOW(MBIM_MAGIC,4,unsigned int)

+#define IOCTL_VNIC_REG_CBK_XFER_STATISTICS      _IOR(MBIM_MAGIC,5,unsigned int)

+#define IOCTL_VNIC_REG_CBK_NIC_DISABLE          _IOW(MBIM_MAGIC,6,unsigned int)

+#define IOCTL_VNIC_FREE_UNREAD_BUF              _IOW(MBIM_MAGIC,7,unsigned int)

+#define IOCTL_VNIC_GET_USB_STATUS               _IOR(MBIM_MAGIC,8,unsigned int)

+#define IOCTL_VNIC_GET_CID_MAXTRANSFER_SIZE     _IOR(MBIM_MAGIC,9,unsigned int)

+

+#define IOCTL_VNIC_SET_CID_SESSIONID            _IOW(MBIM_MAGIC,10,unsigned int)

+#define IOCTL_VNIC_SET_CID_DSSSESSIONID         _IOW(MBIM_MAGIC,11,unsigned int)

+#define IOCTL_VNIC_SET_NET_LOOP_TEST            _IOW(MBIM_MAGIC,12,unsigned int)

+#define IOCTL_VNIC_GET_NET_STATUS               _IOR(MBIM_MAGIC,13,unsigned int)

+

+

+

+

+#define MBIM_STATUS_BYTECOUNT		64    //16

+#define LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */

+#define FORMATS_SUPPORTED_MBIM  	0x0001

+#define NCM_NDP_HDR_CRC		0x01000000

+#define NCM_NDP_HDR_NOCRC	0x00000000

+

+#define MBIM_NTB_DEFAULT_IN_SIZE	   16384

+#define MBIM_NTB_DEFAULT_OUT_SIZE		16384

+

+

+#define MBIM_CTRL_NOCOPY

+#define USB_MBIM_CTRL_MSG_NODE_CNT 32

+#define MBIM_MAX_CONFIGS	1

+#define MBIM_MAX_CTRL_MSG      0x200 //512 ,´Ë´¦ÔÝʱ²»Òª´óÓÚ1024(composite·ÖÅäµÄreq´óСΪ1024)

+#define MBIM_MAX_POOL_NUM    32

+#define	MAX_TX_NONFIXED		(512 * 3)

+#define MBIM_INPUT_DATAGRAM_MAX_COUNT  10 

+

+//struct f_mbim	* g_mbim = NULL;

+

+#define basic_connect_command 0

+#define sms_command 1

+//char *g_sent_buf = NULL;

+

+#define USB_CDC_NCM_NDP16_LENGTH_MIN		0x10

+

+#define MBIM_IP_MTU_EXTRA      20

+#define MBIM_SMS_FALG  0x3F6CE523

+

+struct mbim_pool_ctrl_s{

+    struct list_head list;

+    u16 max_size ;

+    u16 real_size ;

+    u8  buf[0] ;

+};

+

+struct mbim_pool_data_s{

+    struct list_head list;

+    void*  pdata ;

+};

+

+struct ndp_parser_opts {

+	u32		nth_sign;

+	u32		ndp_sign;

+	u16	    nth_size;

+	u16   	ndp_size;

+	unsigned	ndplen_align;

+	/* sizes in u16 units */

+	u16 	dgram_item_len; /* index or length */

+	u16 	block_length;

+	u16  	fp_index;

+	u16     	reserved1;

+	u16     	reserved2;

+	u16     	next_fp_index;

+};

+

+

+__packed typedef struct{

+	u32	ntb_input_size;

+	u16	ntb_max_datagrams;

+	u16	reserved;

+}T_MBIM_NTB_INPUT_SIZE;

+

+typedef struct{

+	struct usb_cdc_mbim_ntb_parameters ntbParam;

+	T_MBIM_NTB_INPUT_SIZE ntbSetInputSize;

+	T_MBIM_NTB_INPUT_SIZE ntbGetInputSize;

+	u16			 	  ntbSetFormat;

+	u16			 	  ntbGetFormat;

+	u16				  ntbSetDataGramSize;

+	u16				  ntbGetDataGramSize;

+	bool				  ntbPortStatus;

+}T_MBIM_STATUS_PARRAM;

+

+typedef struct mbim_params {

+	u8			confignr;

+	u8			used;

+	u16			saved_filter;

+	enum rndis_state	state;

+	u32			medium;

+	u32			speed;

+	u32			media_state;

+

+	const u8		*host_mac;

+	u16			*filter;

+	struct net_device	*dev;

+

+	u32			vendorID;

+	const char		*vendorDescr;

+	void			(*resp_avail)(void *v);

+	void			*v;

+	struct list_head		resp_queue;

+}mbim_params;

+static mbim_params mbim_per_dev_params[MBIM_MAX_CONFIGS];

+static T_MBIM_STATUS_PARRAM s_mbimStatusParam;

+

+enum mbim_notify_state {

+	MBIM_NOTIFY_NONE,		/* don't notify */

+	MBIM_NOTIFY_CONNECT,		/* issue CONNECT next */

+	MBIM_NOTIFY_SPEED,		/* issue SPEED_CHANGE next */

+};

+

+

+struct f_mbim {

+	struct gether			port;

+

+	u8				ctrl_id, data_id;

+

+	char				ethaddr[14];

+	int				    config;

+	struct usb_ep		*notify;

+	struct usb_request	*notify_req;

+	atomic_t			notify_count;

+	u8				notify_state;

+	bool				is_open;

+	bool 				portOpenState;

+	bool 				devResetState;

+	struct ndp_parser_opts		*parser_opts;

+

+    struct usb_cdc_mbim_ntb_parameters  ntb_params ;

+    int             state;/*0 unbind;1 bind;2 open;3 close*/

+	int 			trans_flag;		/*0 can not rea/write;1  read/write ok*/			

+	/*

+	 * for notification, it is accessed from both

+	 * callback and ethernet open/close

+	 */

+	 

+	spinlock_t			lock;

+

+     //ÓëÓ¦Óò㽻»¥¿ØÖÆÍ¨µÀ

+    atomic_t ctrl_rx_cnt ;

+    atomic_t ctrl_tx_cnt ;

+    struct spinlock  ctrl_lock ;

+

+    struct list_head ctrl_rx_list ;

+    struct list_head ctrl_tx_list ;

+    //struct list_head  ctrl_idle_list ;

+    atomic_t idle_cnt ;

+    struct list_head  idle_list ;

+

+    wait_queue_head_t ctrl_read_wq ;

+    wait_queue_head_t ctrl_write_wq ;

+    bool ctrl_open_flag ;

+    

+    //ÓëÓ¦ÓòãÊý¾Ýͨ

+    atomic_t data_rx_cnt ;

+    atomic_t data_tx_cnt ;

+    spinlock_t   data_lock ;

+    struct list_head  data_rx_list ;

+    struct list_head  data_ilde_list ;

+    atomic_t data_idle_cnt ;

+

+

+    void * pool_mem ;  //CIDÄÚ´æ³ØÊ×µØÖ·

+

+    void * data_pool_mem ;

+

+    wait_queue_head_t data_read_wq ;

+    wait_queue_head_t data_write_wq ;

+    struct spinlock   conn_lock ;

+    bool data_open_flag ;

+    atomic_t  netlink_path_status ;

+    atomic_t  dss_session_id ;

+    atomic_t  session_id ;

+    uint32_t rx_max ;   //mbim ÄÜÊÕµÄ×î´óntb

+    uint32_t tx_max ;   //mbim ÄÜ·¢Ë͵Ä×î´óntb

+    uint16_t  rx_seq  ; //ntbµÄ rxÐòÁкÅ

+    uint16_t  tx_seq  ; //ntbµÄ rxÐòÁкÅ

+    uint8_t   loop_test_status ;

+

+	wait_queue_head_t lp_wait;

+	struct task_struct	*lp_thread;    

+    atomic_t lb_flag ;

+

+    

+};

+

+

+

+#define INIT_NDP16_OPTS {					\

+		.nth_sign = USB_CDC_NCM_NTH16_SIGN,		\

+		.ndp_sign = USB_CDC_MBIM_NDP16_IPS_SIGN,	\

+		.nth_size = sizeof(struct usb_cdc_ncm_nth16),	\

+		.ndp_size = sizeof(struct usb_cdc_ncm_ndp16),	\

+		.ndplen_align = 4,				\

+		.dgram_item_len = 1,				\

+		.block_length = 1,				\

+		.fp_index = 1,					\

+		.reserved1 = 0,					\

+		.reserved2 = 0,					\

+		.next_fp_index = 1,				\

+	}

+

+

+#define INIT_NDP32_OPTS {					\

+		.nth_sign = USB_CDC_NCM_NTH32_SIGN,		\

+		.ndp_sign = USB_CDC_MBIM_NDP32_IPS_SIGN,	\

+		.nth_size = sizeof(struct usb_cdc_ncm_nth32),	\

+		.ndp_size = sizeof(struct usb_cdc_ncm_ndp32),	\

+		.ndplen_align = 8,				\

+		.dgram_item_len = 2,				\

+		.block_length = 2,				\

+		.fp_index = 2,					\

+		.reserved1 = 1,					\

+		.reserved2 = 2,					\

+		.next_fp_index = 2,				\

+	}

+

+static struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS;

+static struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS;

+static u8 mbim_mac[6]= {0x84,0x8F,0xBD,0x5C,0x5B,0x5B};

+static int mbim_conn_pool_init(struct f_mbim * dev);

+void mbim_conn_pool_deinit(struct f_mbim * dev);

+

+static inline void put_ncm(__le16 **p, unsigned size, unsigned val)

+{

+	switch (size) {

+	case 1:

+		put_unaligned_le16((u16)val, *p);

+		break;

+	case 2:

+		put_unaligned_le32((u32)val, *p);

+

+		break;

+	default:

+		BUG();

+	}

+

+	*p += size;

+}

+

+static inline unsigned get_ncm(__le16 **p, unsigned size)

+{

+	unsigned tmp;

+

+	switch (size) {

+	case 1:

+		tmp = get_unaligned_le16(*p);

+		break;

+	case 2:

+		tmp = get_unaligned_le32(*p);

+		break;

+	default:

+		BUG();

+	}

+

+	*p += size;

+	return tmp;

+}

+

+static inline struct f_mbim *func_to_mbim(struct usb_function *f)

+{

+	return container_of(f, struct f_mbim, port.func);

+}

+

+/* peak (theoretical) bulk transfer rate in bits-per-second */

+static inline unsigned mbim_bitrate(struct usb_gadget *g)

+{

+	if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)

+		return 13 * 512 * 8 * 1000 * 8;

+	else

+		return 19 *  64 * 1 * 1000 * 8;

+}

+

+static struct usb_cdc_mbim_ntb_parameters mbim_ntb_parameters = {

+	.wLength = sizeof mbim_ntb_parameters,

+	.bmNtbFormatsSupported = cpu_to_le16(FORMATS_SUPPORTED_MBIM),

+	.dwNtbInMaxSize = cpu_to_le32(USB_CDC_MBIM_NTB_MIN_IN_SIZE),

+	.wNdpInDivisor = cpu_to_le16(4),

+	.wNdpInPayloadRemainder = cpu_to_le16(0),

+	.wNdpInAlignment = cpu_to_le16(4),

+	.wPadding1 = 0,

+	.dwNtbOutMaxSize = cpu_to_le32(USB_CDC_MBIM_NTB_MIN_OUT_SIZE),

+	.wNdpOutDivisor = cpu_to_le16(4),

+	.wNdpOutPayloadRemainder = cpu_to_le16(0),

+	.wNdpOutAlignment = cpu_to_le16(4),

+	//.wNtbOutMaxDatagrams = 0,

+	.wNtbOutMaxDatagrams = 10,

+};

+

+static struct usb_interface_assoc_descriptor mbim_iad_descriptor = {

+	.bLength =		sizeof mbim_iad_descriptor,

+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,

+

+	.bFirstInterface =		0, 

+	.bInterfaceCount = 		2,	// control + data

+	.bFunctionClass =		USB_CLASS_COMM,

+	.bFunctionSubClass =	USB_CDC_SUBCLASS_MBIM,

+	.bFunctionProtocol =		USB_CDC_PROTO_NONE,

+	/* .iFunction = DYNAMIC */

+};

+

+

+/* interface descriptor: */

+ /*MBIM communication class interface*/

+static struct usb_interface_descriptor mbim_comm_intf = {

+	.bLength =		sizeof mbim_comm_intf,

+	.bDescriptorType =	USB_DT_INTERFACE,

+	.bAlternateSetting =  0x00, //ЭÒéΪ1£¬test´úÂëΪ0

+	/* .bInterfaceNumber = DYNAMIC */

+	/* status endpoint is optional; this could be patched later */

+	.bNumEndpoints =	1,

+	.bInterfaceClass =	USB_CLASS_COMM,

+	.bInterfaceSubClass =  USB_CDC_SUBCLASS_MBIM,

+	.bInterfaceProtocol =    USB_CDC_PROTO_NONE,

+	/*.iInterface = DYNAMIC */

+};

+

+/* MBIM communication header descriptor */

+static struct usb_cdc_header_desc mbim_header_desc = {

+	.bLength =		sizeof mbim_header_desc,

+	.bDescriptorType =	USB_DT_CS_INTERFACE,

+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,

+	.bcdCDC =	BCD_CDC,

+};

+

+static struct usb_cdc_union_desc mbim_union_desc = {

+	.bLength =		sizeof(mbim_union_desc),

+	.bDescriptorType =	USB_DT_CS_INTERFACE,

+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,

+};

+

+

+static struct usb_mbim_func_desc mbim_desc = {

+	.bLength =		sizeof mbim_desc,

+	.bDescriptorType =	USB_DT_CS_INTERFACE,

+	.bDescriptorSubType =	 DSC_SUBTYPE_CS_MBIM,

+	.bcdMBIMVersion=	0x0100,

+	.wMaxControlMessage=MBIM_MAX_CTRL_MSG , //0x1000, //no smaller than 512

+	.bNumberFilters=0x10, //no smaller than 16

+	.bMaxFilterSize=0x80, //not exceed 192

+	.wMaxSegmentSize= 0x0FE0, //no smaller than 2048

+	.bmNetworkCapabilities= 0x20,

+};

+

+static struct usb_cdc_mbim_extended_desc mbim_extended_desc = {

+	.bLength = 		sizeof(mbim_extended_desc),

+	.bDescriptorType =	USB_DT_CS_INTERFACE,

+	.bDescriptorSubType = DSC_SUBTYPE_CS_EXTENDED_MBIM, 

+	.bcdMBIMExtendedVersion = 	0x0100,

+	.bMaxOutstandingCommandMessages =	0x01,

+	.wMTU =	0x05DC,

+ };

+

+

+ /*MBIM data class interface*/

+static struct usb_interface_descriptor mbim_data_intf = {

+	.bLength =		sizeof mbim_data_intf,

+	.bDescriptorType =	USB_DT_INTERFACE,

+	//.bInterfaceNumber =	1,

+	.bAlternateSetting =  0,

+	/* .bInterfaceNumber = DYNAMIC */

+	/* status endpoint is optional; this could be patched later */

+	.bNumEndpoints =	0,

+	.bInterfaceClass =	USB_CLASS_CDC_DATA,

+	.bInterfaceSubClass =  0,

+	.bInterfaceProtocol =  USB_CDC_MBIM_PROTO_NTB,

+	/* .iInterface = DYNAMIC */

+};

+static struct usb_interface_descriptor mbim_data_intf1= {

+	.bLength =		sizeof mbim_data_intf1,

+	.bDescriptorType =	USB_DT_INTERFACE,

+	//.bInterfaceNumber =	1,

+	.bAlternateSetting =  0x01,

+	/* .bInterfaceNumber = DYNAMIC */

+	/* status endpoint is optional; this could be patched later */

+	.bNumEndpoints =	2,

+	.bInterfaceClass =	USB_CLASS_CDC_DATA,

+	.bInterfaceSubClass =   0,

+	.bInterfaceProtocol =  USB_CDC_MBIM_PROTO_NTB,

+	/* .iInterface = DYNAMIC */

+};

+static struct usb_interface_descriptor mbim_data_intf2= {

+	.bLength =		sizeof mbim_data_intf2,

+	.bDescriptorType =	USB_DT_INTERFACE,

+	//.bInterfaceNumber =	1,

+	.bAlternateSetting =  0x02,

+	/* .bInterfaceNumber = DYNAMIC */

+	/* status endpoint is optional; this could be patched later */

+	.bNumEndpoints =	2,

+	.bInterfaceClass =	USB_CLASS_CDC_DATA,

+	.bInterfaceSubClass =  0,

+	.bInterfaceProtocol =   USB_CDC_MBIM_PROTO_NTB,

+	/* .iInterface = DYNAMIC */

+};

+

+

+static struct usb_endpoint_descriptor mbim_notify_desc = {

+	.bLength =		USB_DT_ENDPOINT_SIZE,

+	.bDescriptorType =	USB_DT_ENDPOINT,

+

+	.bEndpointAddress =	USB_DIR_IN,

+	.bmAttributes =		USB_ENDPOINT_XFER_INT,

+	.wMaxPacketSize =	cpu_to_le16(MBIM_STATUS_BYTECOUNT),

+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,

+};

+

+static struct usb_endpoint_descriptor mbim_in_desc = {

+	.bLength =		USB_DT_ENDPOINT_SIZE,

+	.bDescriptorType =	USB_DT_ENDPOINT,

+

+	.bEndpointAddress =	USB_DIR_IN,

+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,

+	//.wMaxPacketSize =	64,

+};

+

+static struct usb_endpoint_descriptor mbim_out_desc = {

+	.bLength =		USB_DT_ENDPOINT_SIZE,

+	.bDescriptorType =	USB_DT_ENDPOINT,

+	.bEndpointAddress =	USB_DIR_OUT,

+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,

+	//.wMaxPacketSize =	64,

+};

+

+static struct usb_descriptor_header *mbim_descriptor_function[]  = {

+	(struct usb_descriptor_header *) &mbim_iad_descriptor,

+	/* CDC MBIM control descriptors */

+	(struct usb_descriptor_header *) &mbim_comm_intf,

+	(struct usb_descriptor_header *) &mbim_header_desc,

+	(struct usb_descriptor_header *) &mbim_union_desc,

+	(struct usb_descriptor_header *) &mbim_desc,

+	(struct usb_descriptor_header *) &mbim_extended_desc,

+	(struct usb_descriptor_header *) &mbim_notify_desc,

+	/* data interface, altsettings 0 and 2 */

+	(struct usb_descriptor_header *) &mbim_data_intf,

+	(struct usb_descriptor_header *) &mbim_data_intf1,

+	(struct usb_descriptor_header *) &mbim_in_desc,

+	(struct usb_descriptor_header *) &mbim_out_desc,

+	NULL,

+};

+

+

+static struct usb_endpoint_descriptor mbim_data_notify = {

+	.bLength =		USB_DT_ENDPOINT_SIZE,

+	.bDescriptorType =	USB_DT_ENDPOINT,

+

+	.bEndpointAddress =	USB_DIR_IN,

+	.bmAttributes =		USB_ENDPOINT_XFER_INT,

+	.wMaxPacketSize =	cpu_to_le16(MBIM_STATUS_BYTECOUNT),

+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,

+};

+static struct usb_endpoint_descriptor mbim_data_in = {

+	.bLength =		USB_DT_ENDPOINT_SIZE,

+	.bDescriptorType =	USB_DT_ENDPOINT,

+

+	.bEndpointAddress =	USB_DIR_IN,

+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,

+	.wMaxPacketSize =	 cpu_to_le16(512),

+};

+

+static struct usb_endpoint_descriptor mbim_data_out = {

+	.bLength =		USB_DT_ENDPOINT_SIZE,

+	.bDescriptorType =	USB_DT_ENDPOINT,

+

+	.bEndpointAddress =	USB_DIR_OUT,

+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,

+	.wMaxPacketSize =	cpu_to_le16(512),

+};

+

+

+

+static struct usb_descriptor_header *mbim_data_descriptor_function[]  = {

+	(struct usb_descriptor_header *) &mbim_iad_descriptor,

+	/* CDC MBIM control descriptors */

+	(struct usb_descriptor_header *) &mbim_comm_intf,

+	(struct usb_descriptor_header *) &mbim_header_desc,

+	(struct usb_descriptor_header *) &mbim_union_desc,

+	(struct usb_descriptor_header *) &mbim_desc,

+	(struct usb_descriptor_header *) &mbim_extended_desc,

+	(struct usb_descriptor_header *) &mbim_data_notify,

+	/* data interface, altsettings 0 and 2 */

+	(struct usb_descriptor_header *) &mbim_data_intf,

+	(struct usb_descriptor_header *) &mbim_data_intf1,

+	(struct usb_descriptor_header *) &mbim_data_in,

+	(struct usb_descriptor_header *) &mbim_data_out,

+	NULL,

+};

+

+/* string descriptors: */

+

+#define STRING_CTRL_IDX	0

+#define STRING_MAC_IDX	1

+#define STRING_DATA_IDX	2

+#define STRING_IAD_IDX	3

+

+static struct usb_string mbim_string_defs[] = {

+	[STRING_CTRL_IDX].s = "CDC Network Control Model (MBIM)",

+	[STRING_MAC_IDX].s = NULL /* DYNAMIC */,

+	[STRING_DATA_IDX].s = "CDC Network Data",

+	[STRING_IAD_IDX].s = "CDC MBIM",

+	{  } /* end of list */

+};

+

+static struct usb_gadget_strings mbim_string_table = {

+	.language =		0x0409,	/* en-us */

+	.strings =		mbim_string_defs,

+};

+

+static struct usb_gadget_strings *mbim_strings[] = {

+	&mbim_string_table,

+	NULL,

+};

+

+#if 1

+int got_sms_flag = 0;

+int mbim_get_sms_flag(void)

+{

+	return got_sms_flag;

+}

+

+typedef struct __RX_DEBUG{

+	int cmd;

+	int msg_len;

+	int txn_id;

+	int real_len;

+    struct list_head *list;	

+}drv_rx_debug_info;

+drv_rx_debug_info g_drv_mbim_rx_info[32] ={0};

+int g_drv_mbim_rx_cnt = 0;

+drv_rx_debug_info g_drv_mbim_tx_info[32] ={0};

+int g_drv_mbim_tx_cnt = 0;

+

+

+drv_rx_debug_info g_drv_mbim_rx_all[64] ={0};

+int g_drv_mbim_rx_all_cnt = 0;

+

+void drv_mbim_check_sms(__le32 *data, int rx_tx)

+{

+	__le32 *check_sms  = data;

+	int tmp_flag = get_unaligned_le32(check_sms + 8);

+	int tmp_cid = get_unaligned_le32(check_sms + 9);

+	

+	if(tmp_flag == MBIM_SMS_FALG){

+		printk("\ndrv_mbim_check_sms , %s, got sms,tmp_cid:%08x\n", (rx_tx == 0 ? "read" : "write"), tmp_cid);

+		got_sms_flag += 1;

+	}

+

+}

+void drv_mbim_rx_static(struct mbim_pool_ctrl_s *pctrl)

+{

+

+	u8* databuf = pctrl->buf;

+    __le32 *tmp = (__le32 *)databuf;

+	drv_mbim_check_sms(tmp, 0);

+	g_drv_mbim_rx_info[g_drv_mbim_rx_cnt].cmd = get_unaligned_le32(tmp++);

+	g_drv_mbim_rx_info[g_drv_mbim_rx_cnt].msg_len = get_unaligned_le32(tmp++);

+	g_drv_mbim_rx_info[g_drv_mbim_rx_cnt].txn_id = get_unaligned_le32(tmp++);

+	g_drv_mbim_rx_info[g_drv_mbim_rx_cnt].real_len = pctrl->real_size;

+	g_drv_mbim_rx_info[g_drv_mbim_rx_cnt].list= &pctrl->list;

+	if(++g_drv_mbim_rx_cnt >= 32)

+		g_drv_mbim_rx_cnt = 0;

+	

+}

+

+void drv_mbim_tx_static(struct mbim_pool_ctrl_s *pctrl)

+{

+	u8* databuf = pctrl->buf;

+    __le32 *tmp = (__le32 *)databuf;

+	drv_mbim_check_sms(tmp, 1);

+	g_drv_mbim_tx_info[g_drv_mbim_tx_cnt].cmd = get_unaligned_le32(tmp++);

+	g_drv_mbim_tx_info[g_drv_mbim_tx_cnt].msg_len = get_unaligned_le32(tmp++);

+	g_drv_mbim_tx_info[g_drv_mbim_tx_cnt].txn_id = get_unaligned_le32(tmp++);

+	g_drv_mbim_tx_info[g_drv_mbim_tx_cnt].real_len = pctrl->real_size;

+	g_drv_mbim_tx_info[g_drv_mbim_tx_cnt].list= &pctrl->list;

+	if(++g_drv_mbim_tx_cnt >= 32)

+		g_drv_mbim_tx_cnt = 0;

+	

+}

+

+void drv_mbim_rx_all(u8 *buff, int len)

+{

+

+	//u8* databuf = pctrl->buf;

+    __le32 *tmp = (__le32 *)buff;

+	g_drv_mbim_rx_all[g_drv_mbim_rx_all_cnt].cmd = get_unaligned_le32(tmp++);

+	g_drv_mbim_rx_all[g_drv_mbim_rx_all_cnt].msg_len = get_unaligned_le32(tmp++);

+	g_drv_mbim_rx_all[g_drv_mbim_rx_all_cnt].txn_id = get_unaligned_le32(tmp++);

+	g_drv_mbim_rx_all[g_drv_mbim_rx_all_cnt].real_len = len;

+	if(++g_drv_mbim_rx_all_cnt >= 64)

+		g_drv_mbim_rx_all_cnt = 0;

+	

+}

+

+#endif 

+

+static struct f_mbim* g_mbim = NULL ;

+extern int get_vnic_multi_packet_num(void) ;

+static inline void mbim_reset_values(struct f_mbim *mbim)

+{

+	mbim->parser_opts = &ndp16_opts;

+	mbim->port.cdc_filter = DEFAULT_FILTER;

+

+	/* doesn't make sense for ncm, fixed size used */

+	mbim->port.header_len = 0;

+	mbim->port.fixed_out_len = MBIM_NTB_DEFAULT_OUT_SIZE ;//  le32_to_cpu(mbim_ntb_parameters.dwNtbOutMaxSize);

+	mbim->port.fixed_in_len = MBIM_NTB_DEFAULT_IN_SIZE ;

+    mbim->ntb_params = mbim_ntb_parameters ;

+    //out Ö§³Ö×î´óµÄ°üÊý

+    mbim->ntb_params.dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE);

+     mbim->ntb_params.dwNtbOutMaxSize =  cpu_to_le32(MBIM_NTB_DEFAULT_OUT_SIZE);

+    mbim->ntb_params.wNtbOutMaxDatagrams =  cpu_to_le16(get_vnic_multi_packet_num());

+}

+void gether_mbim_uevent(int ecm_switch);

+

+#ifdef CONFIG_PM

+unsigned int g_mbim_suspend_cnt = 0;

+unsigned int g_mbim_resume_cnt = 0;

+

+static void mbim_suspend(struct usb_function *f)

+{

+	struct f_mbim		*mbim = func_to_mbim(f);

+	struct usb_composite_dev *cdev = f->config->cdev;

+	

+	if (!mbim->notify->driver_data)

+		return;

+    g_mbim_suspend_cnt++;

+#if 0	

+

+	usb_printk("%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_mbim_suspend_cnt);

+	USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_mbim_suspend_cnt);

+//	gether_disconnect(&mbim->port);

+    mbim->port.suspend_state = 1;

+    multi_packet_deactivate();

+    usb_ep_disable((&mbim->port)->in_ep);

+//  usb_ep_disable((&mbim->port)->out_ep);

+#endif

+	mbim->port.suspend_state = 1;

+    printk("\n mbim_susp_cnt:%u, notify_cnt:%d, ctrl_txq:%d, \n\n", 

+		g_mbim_suspend_cnt,atomic_read(&mbim->notify_count), atomic_read(&mbim->ctrl_tx_cnt));

+    USBSTACK_DBG("mbim_susp_cnt:%u, notify_cnt:%d, ctrl_txq:%d\n", 

+		g_mbim_suspend_cnt,atomic_read(&mbim->notify_count), atomic_read(&mbim->ctrl_tx_cnt));

+    //gether_mbim_uevent(0);

+}

+static void mbim_resume(struct usb_function *f)

+{

+	struct f_mbim		*mbim = func_to_mbim(f);

+	struct usb_composite_dev *cdev = f->config->cdev;

+

+	if (!mbim->notify->driver_data){

+		printk("\n\n#######%s, %u ,notify->driver_data is null\n\n", __func__, __LINE__);

+		USBSTACK_DBG("#####%s, %u ,notify->driver_data is null\n", __func__, __LINE__);

+		return;

+	}	

+    g_mbim_resume_cnt++;

+#if 0

+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_mbim_resume_cnt);

+	USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_mbim_resume_cnt);

+//	gether_connect(&mbim->port);

+    usb_ep_resume_enable(mbim->notify);

+    usb_ep_resume_enable((&mbim->port)->in_ep);

+//  usb_ep_enable((&mbim->port)->out_ep);

+#endif

+	mbim->port.suspend_state = 0;

+    printk("\n mbim_resume_cnt:%u\n\n", g_mbim_resume_cnt);

+    USBSTACK_DBG("mbim_resume_cnt:%u\n", g_mbim_resume_cnt);

+	//if(atomic_read(&mbim->lb_flag) == 0)

+	//    gether_mbim_uevent(1);

+    //multi_packet_activate();

+

+}

+

+#endif

+

+

+

+static void mbim_do_notify(struct f_mbim *mbim)

+{

+	printk(" mbim_do_notify\n");

+	struct usb_request		*req = mbim->notify_req;

+	struct usb_cdc_notification	*event;

+	struct usb_composite_dev	*cdev = mbim->port.func.config->cdev;

+	__le32				*data;

+	int				status;

+

+	/* notification already in flight? */

+	if (!req)

+		return;

+

+	event = req->buf;

+	switch (mbim->notify_state) {

+	case MBIM_NOTIFY_NONE:

+		return;

+

+	case MBIM_NOTIFY_CONNECT:

+		event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;

+		if (mbim->is_open)

+			event->wValue = cpu_to_le16(1);

+		else

+			event->wValue = cpu_to_le16(0);

+		event->wLength = 0;

+		req->length = sizeof *event;

+

+		//USBSTACK_DBG (cdev, "notify connect %s\n",mbim->is_open ? "true" : "false");

+		mbim->notify_state = MBIM_NOTIFY_NONE;

+		break;

+

+	case MBIM_NOTIFY_SPEED:

+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;

+		event->wValue = cpu_to_le16(0);

+		event->wLength = cpu_to_le16(8);

+		req->length = MBIM_STATUS_BYTECOUNT;

+

+		/* SPEED_CHANGE data is up/down speeds in bits/sec */

+		data = req->buf + sizeof *event;

+		data[0] = cpu_to_le32(mbim_bitrate(cdev->gadget));

+		data[1] = data[0];

+

+		//USBSTACK_DBG ( cdev, "notify speed %d\n", mbim_bitrate(cdev->gadget));

+		mbim->notify_state = MBIM_NOTIFY_CONNECT;

+		break;

+	}

+	event->bmRequestType = 0xA1;

+	event->wIndex = cpu_to_le16(mbim->ctrl_id);

+    printk("\n\n[file]: %s ,[][func]: %s ,[line] = %d \n\n",__FILE__,__func__,__LINE__);

+

+	mbim->notify_req = NULL;

+	/*

+	 * In double buffering if there is a space in FIFO,

+	 * completion callback can be called right after the call,

+	 * so unlocking

+	 */

+	spin_unlock(&mbim->lock);

+	status = usb_ep_queue(mbim->notify, req, GFP_ATOMIC);

+	spin_lock(&mbim->lock);

+	if (status < 0) {

+		mbim->notify_req = req;

+		//USBSTACK_DBG ( cdev, "notify --> %d\n", status);

+	}

+}

+

+static void mbim_notify(struct f_mbim *mbim)

+{

+	/*

+	 * NOTE on most versions of Linux, host side cdc-ethernet

+	 * won't listen for notifications until its netdevice opens.

+	 * The first notification then sits in the FIFO for a long

+	 * time, and the second one is queued.

+	 *

+	 * If ncm_notify() is called before the second (CONNECT)

+	 * notification is sent, then it will reset to send the SPEED

+	 * notificaion again (and again, and again), but it's not a problem

+	 */

+	mbim->notify_state = MBIM_NOTIFY_SPEED;

+	mbim_do_notify(mbim);

+}

+

+static void mbim_notify_complete(struct usb_ep *ep, struct usb_request *req)

+{

+	struct f_mbim			*mbim = req->context;

+	struct usb_composite_dev	*cdev = mbim->port.func.config->cdev;

+	//struct usb_cdc_notification	*event = req->buf;

+

+	spin_lock(&mbim->lock);

+	switch (req->status) {

+	case 0:

+		//USBSTACK_DBG(cdev, "Notification %02x sent\n",

+		 //    event->bNotificationType);

+		break;

+	case -ECONNRESET:

+	case -ESHUTDOWN:

+		mbim->notify_state = MBIM_NOTIFY_NONE;

+		break;

+	default:

+		//USBSTACK_DBG ( cdev, "event %02x --> %d\n",

+		//	event->bNotificationType, req->status);

+		break;

+	}

+	//mbim->notify_req = req;

+	//mbim_do_notify(mbim);

+	spin_unlock(&mbim->lock);

+}

+

+

+

+static void mbim_ep0out_complete(struct usb_ep *ep, struct usb_request *req)

+{

+	/* now for SET_NTB_INPUT_SIZE only */

+	unsigned		in_size;

+	struct usb_function	*f = req->context;

+	struct f_mbim		*mbim = func_to_mbim(f);

+	struct usb_composite_dev *cdev = ep->driver_data;

+

+	req->context = NULL;

+	if (req->status || req->actual != req->length) {

+		//USBSTACK_DBG(cdev, "Bad control-OUT transfer\n");

+		goto invalid;

+	}

+

+	in_size = get_unaligned_le32(req->buf);

+	if (in_size < USB_CDC_MBIM_NTB_MIN_IN_SIZE ||

+	    in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) {

+		//USBSTACK_DBG(cdev, "Got wrong INPUT SIZE (%d) from host\n", in_size);

+		goto invalid;

+	}

+

+	mbim->port.fixed_in_len = in_size;

+	//USBSTACK_DBG(cdev, "Set NTB INPUT SIZE %d\n", in_size);

+	return;

+

+invalid:

+	usb_ep_set_halt(ep);

+	return;

+}

+

+

+int mbim_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)

+{	

+	printk("mbim_set_param_dev\n");

+	if (!dev)

+		return -EINVAL;

+	//if (configNr >= MBIM_MAX_CONFIGS) return -1;

+	mbim_per_dev_params[configNr].dev = dev;

+	mbim_per_dev_params[configNr].filter = cdc_filter;

+

+	return 0;

+}

+

+int mbim_msg_patch(u8 *buf, int len)

+{

+	u8* temp_buf = NULL;

+	__le32 *temp;

+	temp_buf = (u8*)kmalloc(len, GFP_KERNEL);

+	if(!temp_buf){

+		printk("rndis_msg_patch malloc fail\n");

+		return -1;

+	}

+	memset(temp_buf, 0 , len);

+	temp = (__le32 *)temp_buf;

+	*temp = cpu_to_le32(MBIM_COMMAND_MSG);

+	memcpy(temp_buf+4, buf, len -4);

+	memcpy(buf, temp_buf, len);

+	kfree(temp_buf);

+	temp_buf = NULL;

+	return 0;

+}

+

+

+int mbim_cid_msg_filter(u8 *buf ,unsigned int len, int filter_type)

+{

+    __le32 *tmp;

+    u32 MsgType, MsgLength, tx_id ;

+    int ret = 0 ;

+    if (!buf || len <= 0)

+    {

+         printk("[func]:%s,[line]:%d , param invalid\n",__func__,__LINE__) ;

+         return -EINVAL;

+    }

+    tmp = (__le32 *)buf;

+    MsgType   = get_unaligned_le32(tmp++);

+    MsgLength = get_unaligned_le32(tmp++);

+	tx_id = get_unaligned_le32(tmp);

+	if(filter_type == 1){

+		//printk("[func]:%s MsgType:%08x, MsgLength:%d, tx_id:%x, act_len%d\n",__func__, MsgType, MsgLength, tx_id, len) ;

+		USBSTACK_DBG("[func]:%s MsgType:%08x, MsgLength:%d, tx_id:%x, act_len%d\n",__func__, MsgType, MsgLength, tx_id, len) ;

+	}

+	

+    switch (MsgType) 

+   {

+    case MBIM_OPEN_MSG:

+    {

+         //printk("[func]:%s,[line]:%d , MBIM_OPEN_MSG \n",__func__,__LINE__) ;

+         ret = 0 ;

+         break;

+    }

+    case MBIM_CLOSE_MSG:

+    {

+         //printk("[func]:%s,[line]:%d , MBIM_CLOSE_MSG\n",__func__,__LINE__) ;

+         ret = 0 ;

+         break;

+    }

+    case MBIM_COMMAND_MSG:

+    {

+         //printk("[func]:%s,[line]:%d , MBIM_COMMAND_MSG ,len = %d \n",__func__,__LINE__,len) ;

+         ret = 0 ;

+         break;

+    }

+    case MBIM_OPEN_DONE:

+    {

+         //printk("[func]:%s,[line]:%d , MBIM_OPEN_DONE\n",__func__,__LINE__) ;

+         ret = 0 ;

+         break;

+    }

+    case MBIM_CLOSE_DONE:

+    {

+         //printk("[func]:%s,[line]:%d , MBIM_CLOSE_DONE\n",__func__,__LINE__) ;

+         ret = 0 ;

+         break;

+    }

+    case MBIM_COMMAND_DONE:

+    {

+         //printk("[func]:%s,[line]:%d , MBIM_COMMAND_DONE\n",__func__,__LINE__) ;

+         ret = 0 ;

+         break;

+    }

+    case MBIM_INDICATE_STATUS_MSG:

+    {

+         //printk("[func]:%s,[line]:%d ,MBIM_INDICATE_STATUS_MSG\n",__func__,__LINE__) ;

+         ret = 0 ;

+         break;

+    }

+

+    case MBIM_HOST_ERROR_MSG:

+    {

+         printk("[func]:%s,[line]:%d , MBIM_HOST_ERROR_MSG\n",__func__,__LINE__) ;

+         ret = 0 ;

+         break;

+    }

+    case MBIM_FUNCTION_ERROR_MSG:

+    {

+         printk("[func]:%s,[line]:%d , MBIM_FUNCTION_ERROR_MSG\n",__func__,__LINE__) ;

+         ret = 0 ;

+         break;

+    }

+    default:

+    {

+         printk("[func]:%s,[line]:%d , UNKNOWN CID :%d\n",__func__,__LINE__ ,MsgType) ;

+         ret = -1 ;

+         break;

+    }

+

+    }

+    return ret;

+

+

+}

+

+int mbim_register(void (*resp_avail)(void *v), void *v)

+{

+	u8 i;

+

+	if (!resp_avail)

+		return -EINVAL;

+

+	for (i = 0; i < MBIM_MAX_CONFIGS; i++) {

+		if (!mbim_per_dev_params[i].used) {

+			mbim_per_dev_params[i].used = 1;

+			mbim_per_dev_params[i].resp_avail = resp_avail;

+			mbim_per_dev_params[i].v = v;

+			printk("-----%s: configNr = %d\n", __func__, i);

+			return i;

+		}

+	}

+	pr_debug("failed\n");

+

+	return -ENODEV;

+}

+

+void mbim_deregister(int configNr)

+{

+	pr_debug("%s:\n", __func__);

+

+	if (configNr >= RNDIS_MAX_CONFIGS) return;

+	mbim_per_dev_params[configNr].used = 0;

+}

+int resp_cnt = 0;

+int resp_cmplete_cnt = 0;

+int mbim_response_available(void *_mbim)

+{

+

+	int				status;

+	struct f_mbim			*mbim = (struct f_mbim	*)_mbim;

+	struct usb_request		*req = mbim->notify_req;

+	struct usb_composite_dev	*cdev = mbim->port.func.config->cdev;

+    if(req ==NULL)

+    {

+        printk("\n\nreq is NULL \n\n\n") ;

+        return -1;

+    }

+	if(mbim->trans_flag == 0){        

+		printk("\n\n trans_flag is NULL \n\n\n") ;

+        return -1;

+		

+	}

+	__le32	*data = req->buf;

+

+	if (atomic_inc_return(&mbim->notify_count) != 1){

+		printk("---mbim_response_available,notify_count error, notify_count:%d, excet reset usb\n", atomic_read(&mbim->notify_count)) ;

+		USBSTACK_DBG("%s notify_count error, notify_count:%d, excet reset usb",__func__, atomic_read(&mbim->notify_count));

+		//usb_notify_up(USB_DEVICE_EXCEPT_RESET, NULL);

+		return 0;

+	}

+	/* Send RNDIS RESPONSE_AVAILABLE notification; a

+	 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too

+	 *

+	 * This is the only notification defined by RNDIS.

+	 */

+	data[0] = cpu_to_le32(0x000001A1);

+	data[1] = cpu_to_le32(0);

+	//printk("--response_available-ep_queue\n");

+	resp_cnt += 1;

+	status = usb_ep_queue(mbim->notify, req, GFP_ATOMIC);

+	if (status) {

+		printk("--response_available-ep queue fail, status:%d\n", status);

+		USBSTACK_DBG("--response_available-ep queue fail, status:%d\n", status);

+		atomic_dec(&mbim->notify_count);

+		//DBG(cdev, "notify/0 --> %d\n", status);

+		return -1;

+	}

+

+	return 0;

+}

+

+static void mbim_response_complete(struct usb_ep *ep, struct usb_request *req)

+{

+	struct f_mbim			*mbim = req->context;

+	struct usb_composite_dev	*cdev = mbim->port.func.config->cdev;

+	int				status = req->status;

+

+	/* after TX:

+	 *  - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)

+	 *  -MBIM_RESPONSE_AVAILABLE (status/irq)

+	 */

+	 char * g_sent_buf = (char *)req->buf;

+	//printk("-----response_complete, status:%d\n", req->status);

+	USBSTACK_DBG("%s ret: %d, req:0x%p, buf:0x%p",__func__, status, req, g_sent_buf);

+//    USBSTACK_DBG("%s, %u", __func__, __LINE__);

+//    usb_dbg_ep0reg();

+	switch (status) {

+	case -ECONNRESET:

+	case -ESHUTDOWN:

+		/* connection gone */

+		atomic_set(&mbim->notify_count, 0);

+		break;

+	default:

+		printk( "mbim %s response error %d, %d/%d\n",

+			ep->name, status,

+			req->actual, req->length);

+		/* FALLTHROUGH */

+	case 0:

+		if (ep != mbim->notify){

+			//printk("-----ctrl response_complete\n");

+			break;

+		}

+		resp_cmplete_cnt += 1;

+		/* handle multiple pending MBIM_RESPONSE_AVAILABLE

+		 * notifications by resending until we're done

+		 */

+		if(atomic_read(&mbim->notify_count) == 0){

+			break;

+		}

+		if (atomic_dec_and_test(&mbim->notify_count))

+			break;

+		resp_cnt += 1;

+		status = usb_ep_queue(mbim->notify, req, GFP_ATOMIC);

+		if (status) {

+			//atomic_dec(&mbim->notify_count);

+			printk( "notify usb_ep_queue fail --> %d\n", status);

+		}

+		break;

+	}

+}

+

+static int mbim_ctrl_send_command_handle(struct f_mbim * mbim_dev ,char *buf,int len)

+{

+    if(mbim_dev == NULL || buf ==NULL || len <0)

+    {

+        printk("[func]:%s,[line]:%d ,param is invaild \n",__func__,__LINE__) ;

+        return -EINVAL ;

+

+    }

+    unsigned long flags;

+    //printk("\n\n########[func]:%s,[line]:%d ,start send to app CID msg \n\n\n",__func__,__LINE__) ;

+    spin_lock_irqsave(&mbim_dev->ctrl_lock,flags) ;

+    if(list_empty(&mbim_dev->idle_list))

+    {

+        spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags) ;

+        printk("\n\n##############[func]:%s,[line]:%d ,idle is empty,idle num =%d,rx num = %d \n\n\n",__func__,__LINE__ ,

+            atomic_read(&mbim_dev->idle_cnt), atomic_read(&mbim_dev->ctrl_rx_cnt) ) ;

+        return -1 ;

+    }

+    struct mbim_pool_ctrl_s *ctrp = list_first_entry(&mbim_dev->idle_list,  struct mbim_pool_ctrl_s, list) ;

+

+    list_del_init(&ctrp->list) ;

+#if 0

+    if(ctrp == NULL)

+    {

+        spin_unlock(&mbim_dev->ctrl_lock) ;

+        printk("[func]:%s,[line]:%d ,pointer ctrp is NULL ,idle num =%d \n",__func__,__LINE__ ,

+            atomic_read(&mbim_dev->idle_cnt) );

+           printk("[func]:%s,[line]:%d  p = 0x%p ,next = 0x%p\n", 

+            container_of(mbim_dev->idle_list.next, struct mbim_pool_ctrl_s, list),

+            mbim_dev->idle_list.next) ;

+        return -1 ;

+

+    }

+#endif

+    atomic_dec(&mbim_dev->idle_cnt);

+    spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags) ;

+    if(len > ctrp->max_size)

+    {

+        printk("[func]:%s,[line]:%d ,cid payload real num = %d ,buffer max num = %d \n",__func__,__LINE__,len ,ctrp->max_size) ;

+    }

+    ctrp->real_size = ((len > ctrp->max_size) ? ctrp->max_size: len );

+    memcpy(ctrp->buf, buf, ctrp->real_size) ;

+    //²åÈë¶Ëµã

+    spin_lock_irqsave(&mbim_dev->ctrl_lock,flags) ;

+    list_add_tail(&ctrp->list, &mbim_dev->ctrl_rx_list) ;

+    atomic_inc(&mbim_dev->ctrl_rx_cnt) ;

+    spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags) ;

+    //»½ÐÑ

+    wake_up(&mbim_dev->ctrl_read_wq);

+    return 0 ;

+

+}

+

+static int mbim_ctrl_get_command_handle(struct f_mbim* mbim_dev ,struct usb_request * req )

+{

+    if(mbim_dev == NULL  || req == NULL)

+    {

+        printk("[func]:%s,[line]:%d ,param is invaild \n",__func__,__LINE__) ;

+        return -EINVAL ;

+    }

+    unsigned long flags ;

+    spin_lock_irqsave(&mbim_dev->ctrl_lock,flags) ;

+    if(list_empty(&mbim_dev->ctrl_tx_list))

+    {

+        spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags) ;

+        printk("[func]:%s,[line]:%d ,tx list is empty, resp_cnt:%d, resp_cmplete_cnt:%d\n",__func__,__LINE__, resp_cnt, resp_cmplete_cnt) ;

+		//panic("tx list is empty\n");

+        return -1 ;

+    }

+    struct mbim_pool_ctrl_s *ctrp = list_first_entry(&mbim_dev->ctrl_tx_list,  struct mbim_pool_ctrl_s, list) ;

+    list_del_init(&ctrp->list) ;

+    atomic_dec(&mbim_dev->ctrl_tx_cnt);

+    spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags) ;

+

+    memcpy(req->buf, ctrp->buf ,ctrp->real_size) ;

+    //²åÈë¶Ëµã

+    

+    spin_lock_irqsave(&mbim_dev->ctrl_lock,flags) ;

+    list_add_tail(&ctrp->list, &mbim_dev->idle_list) ;

+    atomic_inc(&mbim_dev->idle_cnt) ;

+    spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags) ;

+    wake_up(&mbim_dev->ctrl_write_wq);

+    return  ctrp->real_size ;

+

+}

+

+static void mbim_command_complete(struct usb_ep *ep, struct usb_request *req)

+{

+	struct f_mbim			*mbim = req->context;

+	struct usb_composite_dev	*cdev = mbim->port.func.config->cdev;

+	int				status, ret;

+

+	drv_mbim_rx_all(req->buf, req->actual);

+	if(req->status == -ECONNRESET){

+		USBSTACK_DBG("mbim_command_complete, status error\n");

+		return;

+	}

+	

+	//printk("$$$$mbim_command_complete, config:%d\n", mbim->config);

+	//status = mbim_msg_parser(mbim->config, (u8 *) req->buf);

+	status = mbim_cid_msg_filter(req->buf,req->actual, 1) ;

+    if(status)

+    {

+        printk("[func]:%s,[line]:%d , cid msg is intercepted \n",__func__,__LINE__) ;

+		//panic("mbim_command_complete unknown cid\n");

+		//here we patch for this cmd

+		if(status == -1){			

+			ret = mbim_msg_patch(req->buf, req->actual);

+			if(ret){

+				printk("mbim_command_complete, fail for mbim_msg_patch, replugin\n");

+				USBSTACK_DBG("mbim_command_complete, fail for mbim_msg_patch, replugin\n");

+				usb_notify_up(USB_DEVICE_EXCEPT_RESET, NULL);

+		        return ;

+			}

+			USBSTACK_DBG("mbim_command_complete, mbim_msg_patch ok continue\n");

+	        printk("[func]:%s,[line]:%d , mbim_command_complete, mbim_msg_patch ok continue\n",__func__,__LINE__) ;			

+		}

+    }

+	status  = mbim_ctrl_send_command_handle(mbim, req->buf,req->actual) ;

+	USBSTACK_DBG("%s %u, ret: %d, len:%d",__func__, __LINE__, status, req->actual);

+

+	if (status < 0){

+		//ERROR(cdev, "mbim command error %d, %d/%d\n",

+		//	status, req->actual, req->length);

+		USBSTACK_DBG("MBIM command error %d, actual:%d,length:%d\n",status, req->actual, req->length);	

+	}

+}

+

+int got_ntb_param_flag = 0;

+

+void mbim_clean_ntb_param_flag(void)

+{

+	got_ntb_param_flag = 0;

+}

+

+static int mbim_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)

+{

+	struct f_mbim		*mbim = func_to_mbim(f);

+	struct usb_composite_dev *cdev = f->config->cdev;

+	struct usb_request	*req = cdev->req;

+	int			value = -EOPNOTSUPP;

+	u16			w_index = le16_to_cpu(ctrl->wIndex);

+	u16			w_value = le16_to_cpu(ctrl->wValue);

+	u16			w_length = le16_to_cpu(ctrl->wLength);

+	//printk("---mbim_setup, bRequestType:0x%x, bRequest:0x%x, w_index:%02x, w_value:%04x, w_length:%02x\n", 

+	//	ctrl->bRequestType,  ctrl->bRequest,w_index,w_value, w_length);

+

+	/*

+	 * composite driver infrastructure handles everything except

+	 * CDC class messages; interface activation uses set_alt().

+	 */	

+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {

+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)

+			| USB_CDC_SET_ETHERNET_PACKET_FILTER:

+		/*

+		 * see 6.2.30: no data, wIndex = interface,

+		 * wValue = packet filter bitmap

+		 */

+		if (w_length != 0 || w_index != mbim->ctrl_id)

+			goto invalid;

+		//USBSTACK_DBG(cdev, "packet filter %02x\n", w_value);

+		/*

+		 * REVISIT locking of cdc_filter.  This assumes the UDC

+		 * driver won't have a concurrent packet TX irq running on

+		 * another CPU; or that if it does, this write is atomic...

+		 */

+		//printk("\n@@@@@@@@@@@@@@[func]:%s,[line]:%d,w_value = %d@@@@@@@@@@@@@@ \n\n",w_value);

+		mbim->port.cdc_filter = w_value;

+		value = 0;

+		break;

+	/*

+	 * and optionally:

+	 * case USB_CDC_SEND_ENCAPSULATED_COMMAND:

+	 * case USB_CDC_GET_ENCAPSULATED_RESPONSE:

+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:

+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:

+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:

+	 * case USB_CDC_GET_ETHERNET_STATISTIC:

+	 */

+

+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)

+		| USB_CDC_GET_NTB_PARAMETERS:

+

+		if (w_length == 0 || w_value != 0 || w_index != mbim->ctrl_id)

+			goto invalid;

+		value = w_length > sizeof (g_mbim->ntb_params) ?

+			sizeof (g_mbim->ntb_params) : w_length;

+		memcpy(req->buf, &(g_mbim->ntb_params), value);

+		printk("mbim_setup, GET_NTB_PARAMETERS, mbim_uevent_1\r\n");

+        gether_mbim_uevent(1);

+		got_ntb_param_flag = 1;

+		//USBSTACK_DBG(cdev, "Host asked NTB parameters\n");

+		break;

+		

+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)

+			| USB_CDC_RESET_FUNCTION:

+#if 1

+		mbim->devResetState = 1;

+		mbim->portOpenState = 0;

+		s_mbimStatusParam.ntbPortStatus = 0;

+		value = 0;

+		if(got_ntb_param_flag == 0){

+			printk("mbim_setup, RESET_FUNCTION, mbim_uevent_1\r\n");			

+			gether_mbim_uevent(1);

+		}

+		printk("mbim_setup, RESET_FUNCTION, mbim_uevent_2\r\n");

+        gether_mbim_uevent(2) ;

+#endif

+		break;

+

+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)

+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:

+#if 1

+		if(w_length > cdev->bufsiz ||w_value || w_index != mbim->ctrl_id)

+			goto invalid;		

+		/* read the request; process it later */

+		value = w_length;

+		req->complete = mbim_command_complete;

+		req->context = mbim;

+#endif		

+		break;

+

+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)

+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:

+	//printk("---,mbim_setup, GET_ENCAPSULATED_RESPONSE\n" );

+#if 1

+		if (w_value || w_index != mbim->ctrl_id)

+			goto invalid;

+		else {

+            

+			u8 *buf;

+			int n;

+			u32 MsgType;

+			__le32 *tmp;

+

+			/* return the result */

+            n= mbim_ctrl_get_command_handle(mbim,  req) ;

+            if(n>=0)

+            {

+                req->complete = mbim_response_complete;

+                req->context = mbim;

+                value = n ;

+            }

+            else

+            {

+                printk("mbim_setup, resp buf is NULL,error\n");

+

+            }

+

+			/* else stalls ... spec says to avoid that */

+		}

+#endif

+		break;

+

+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)

+		| USB_CDC_GET_NTB_INPUT_SIZE:

+

+		if (w_length < 4 || w_value != 0 || w_index != mbim->ctrl_id)

+			goto invalid;

+		put_unaligned_le32(mbim->port.fixed_in_len, req->buf);

+		value = 4;

+		//USBSTACK_DBG(cdev, "Host asked INPUT SIZE, sending %d\n",

+		//     mbim->port.fixed_in_len);

+		break;

+

+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)

+		| USB_CDC_SET_NTB_INPUT_SIZE:

+	{

+		if (w_length != 4 || w_value != 0 || w_index != mbim->ctrl_id)

+			goto invalid;

+		req->complete = mbim_ep0out_complete;

+		req->length = w_length;

+		req->context = f;

+

+		value = req->length;

+		break;

+	}

+

+	

+	//case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)| USB_CDC_GET_CRC_MODE:

+	//case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)| USB_CDC_SET_CRC_MODE:

+

+	/* and disabled in ncm descriptor: */

+	/* case USB_CDC_GET_NET_ADDRESS: */

+	/* case USB_CDC_SET_NET_ADDRESS: */

+	/* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */

+	/* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */

+

+	default:

+invalid:

+		NULL;

+		//USBSTACK_DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",

+		//	ctrl->bRequestType, ctrl->bRequest,

+		//	w_value, w_index, w_length);

+	}

+

+	/* respond with data transfer or status phase? */

+	if (value >= 0) {

+		//USBSTACK_DBG(cdev, "ncm req%02x.%02x v%04x i%04x l%d\n",

+		//	ctrl->bRequestType, ctrl->bRequest,

+		//	w_value, w_index, w_length);

+

+		req->zero = (value % 64)? 0:1;

+		req->length = value;

+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);

+		if (value < 0)

+			USB_ASSERT(cdev, "ncm req %02x.%02x response err %d\n",ctrl->bRequestType, ctrl->bRequest,value);

+	}

+

+	/* device either stalls (value < 0) or reports success */

+	return value;

+}

+

+static int mbim_set_alt(struct usb_function *f, unsigned intf, unsigned alt)

+{

+

+	struct f_mbim		*mbim = func_to_mbim(f);

+	struct usb_composite_dev *cdev = f->config->cdev;

+    unsigned long flags = 0 ;

+    printk("########%s, %u ,mbim_set_alt ,alt = %d ,intf = %d \n\n",__func__, __LINE__ ,alt,intf );

+

+	/* Control interface has only altsetting 0 */

+	if (intf == mbim->ctrl_id ) {

+

+		printk("$$$$ mbim->ctrl_id ==%d,%d\n",mbim->ctrl_id,mbim->ctrl_id);

+		if (alt != 0)

+		{

+            goto fail;

+        }

+	

+        if(mbim->notify->driver_data == NULL)

+        {

+            if (mbim->notify->driver_data) {

+                printk("$$$$mbim->notify->driver_data\n");

+                //USBSTACK_DBG(cdev, "reset mbim control %d\n", intf);

+                usb_ep_disable(mbim->notify);

+            }

+            

+            if (!(mbim->notify->desc)) {

+                printk("$$$$mbim->notify->desc\n");

+                //USBSTACK_DBG(cdev, "init mbim ctrl %d\n", intf);

+                if (config_ep_by_speed(cdev->gadget, f, mbim->notify))

+                    goto fail;

+            }

+            usb_ep_enable(mbim->notify);

+            mbim->notify->driver_data = mbim;

+			mbim->trans_flag = 1;

+        }

+

+

+

+	/* Data interface has two altsettings, 0 and 1 */

+	} 

+    else if (intf == mbim->data_id) {

+        struct net_device	*net;

+        if (alt > 1)

+        {

+			goto fail;

+        }

+

+        if(alt == 0)

+        {

+           

+            mbim->tx_seq = 0 ;

+            mbim->rx_seq = 0 ;

+ #if 0

+            struct list_head    *entry, *temp;

+            mbim->port.suspend_state = 1;

+            atomic_set(&mbim->notify_count, 0) ;

+        

+            spin_lock_irqsave(&mbim->ctrl_lock,flags);

+            list_for_each_safe(entry, temp, &mbim->ctrl_rx_list) 

+            {

+                struct mbim_pool_ctrl_s *pctrl ;

+                pctrl = list_entry (entry, struct mbim_pool_ctrl_s, list);

+                list_del_init (&pctrl->list) ;

+                pctrl->real_size = 0 ;

+                list_add_tail(&pctrl->list,&mbim->idle_list);

+                atomic_inc(&mbim->idle_cnt) ;

+            }

+            atomic_set(&mbim->ctrl_rx_cnt,0) ;

+            list_for_each_safe(entry, temp, &mbim->ctrl_tx_list) 

+            {

+                struct mbim_pool_ctrl_s *pctrl ;

+                pctrl = list_entry (entry, struct mbim_pool_ctrl_s, list);

+                list_del_init (&pctrl->list) ;

+                pctrl->real_size = 0 ;

+                list_add_tail(&pctrl->list,&mbim->idle_list);

+                atomic_inc(&mbim->idle_cnt) ;

+            }

+            atomic_set(&mbim->ctrl_tx_cnt,0) ;

+            spin_unlock_irqrestore(&mbim->ctrl_lock,flags);

+

+            wake_up(&mbim->ctrl_read_wq) ;

+            wake_up(&mbim->ctrl_write_wq) ;

+#endif

+        }

+        if (mbim->port.in_ep->driver_data) 

+        {

+        	printk("in_ep->driver_data, call gether_disconnect\n");

+            gether_disconnect(&mbim->port);

+            //mbim_reset_values(mbim);

+        }

+        if(mbim->port.in_ep->driver_data == NULL || mbim->port.out_ep->driver_data == NULL )

+        {

+            if (alt == 1)

+            {

+                if (!mbim->port.in_ep->desc || !mbim->port.out_ep->desc) 

+                {   

+                    if (config_ep_by_speed(cdev->gadget, f,

+                                   mbim->port.in_ep) ||

+                        config_ep_by_speed(cdev->gadget, f,

+                                   mbim->port.out_ep)) 

+                    {

+                        printk("\n%s, line:%u ,config error \n\n\n", __func__, __LINE__);

+                        mbim->port.in_ep->desc = NULL;

+                        mbim->port.out_ep->desc = NULL;

+                        goto fail;

+                    }

+                 }

+                //mbim->port.is_zlp_ok = false;

+                mbim->port.is_zlp_ok = true;

+                mbim->port.cdc_filter = DEFAULT_FILTER;

+                net = gether_connect(&mbim->port);

+                if (IS_ERR(net))

+                {

+                    return PTR_ERR(net);

+                }

+                //gether_mbim_uevent(1);

+                mbim_set_param_dev(mbim->config, net,&mbim->port.cdc_filter);

+            }

+        }

+

+	} else

+		goto fail;

+

+	return 0;

+fail:

+	return -EINVAL;

+}

+

+

+static int mbim_get_alt(struct usb_function *f, unsigned intf)

+{

+	struct f_mbim		*mbim = func_to_mbim(f);

+    printk("\n%s, line:%u  \n\n\n", __func__, __LINE__);

+	if (intf == mbim->ctrl_id)

+		return 0;

+	return mbim->port.in_ep->driver_data ? 1 : 0;

+}

+

+/* verify that the ethernet protocol is IPv4 or IPv6 */

+static bool is_ip_proto(__be16 proto)

+{

+	switch (proto) {

+	case htons(ETH_P_IP):

+	case htons(ETH_P_IPV6):

+		return true;

+	}

+	return false;

+}

+

+/* verify NTB header and return offset of first NDP, or negative error */

+static int cdc_ncm_rx_verify_nth16(struct usb_request *req)

+{

+	struct usb_cdc_ncm_nth16 *nth16;

+    struct usb_cdc_ncm_nth16 tmp_nth16;

+	int len;

+	int ret = -EINVAL;

+    struct f_mbim *ctx = g_mbim ;

+    unsigned long mask = 3;

+    bool is_aligned = 0 ;

+

+	if (ctx == NULL)

+		goto error;

+    

+    if(req == NULL || req->buf== NULL || req->actual == 0)

+    {

+		USBSTACK_DBG( "[%s]:invalid input param \n",__func__);

+		goto error;

+    }

+    

+	if (req->actual < (sizeof(struct usb_cdc_ncm_nth16) +sizeof(struct usb_cdc_ncm_ndp16))) 

+    {

+		USBSTACK_DBG("[%s]:frame too short\n",__func__);

+		goto error;

+	}

+    //¼ì²é4×Ö½ÚÊÇ·ñ¶ÔÆë

+    is_aligned = ((unsigned long)(req->buf) & mask) ? 0: 1 ;

+    if(likely(is_aligned))

+    {

+        nth16 = (struct usb_cdc_ncm_nth16 *)req->buf;

+    }

+    else

+    {

+        memcpy(&tmp_nth16,req->buf,sizeof(struct usb_cdc_ncm_nth16)) ;

+        nth16 = &tmp_nth16;

+    }

+

+

+	if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) 

+    {

+		 USBSTACK_DBG( "[%s]:invalid NTH16 signature <%#010x>\n",__func__,le32_to_cpu(nth16->dwSignature));

+		goto error;

+	}

+

+	len = le16_to_cpu(nth16->wBlockLength);

+	if (len > ctx->port.fixed_out_len) 

+    {

+			 USBSTACK_DBG( "[%s]:unsupported NTB block length %u/%u\n", __func__,len,ctx->port.fixed_out_len);

+		goto error;

+	}

+

+	if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) &&

+	    (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&

+	    !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence)))

+	{

+		 USBSTACK_DBG( "[%s]:sequence number glitch prev=%d curr=%d\n",__func__,ctx->rx_seq, le16_to_cpu(nth16->wSequence));

+	}

+	ctx->rx_seq = le16_to_cpu(nth16->wSequence);

+

+	ret = le16_to_cpu(nth16->wNdpIndex);

+error:

+	return ret;

+}

+

+

+/* verify NDP header and return number of datagrams, or negative error */

+static int cdc_ncm_rx_verify_ndp16(struct usb_request *req, int ndpoffset )

+{

+	struct usb_cdc_ncm_ndp16 *ndp16;

+    struct usb_cdc_ncm_ndp16 ndp16_tmp;

+    unsigned long mask = 3;

+    bool is_aligned = 0 ;

+	int ret = -EINVAL;

+    if(req == NULL || req->buf== NULL || req->actual == 0 || ndpoffset == 0 || ndpoffset %4 != 0 )

+    {

+		USBSTACK_DBG( "[%s]:invalid input param ,ndpoffset = %d\n",__func__,ndpoffset);

+		goto error;

+    }

+	if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > req->actual) {

+		USBSTACK_DBG( "[%s]:invalid NDP offset  <%u>\n",__func__,ndpoffset);

+		goto error;

+	}

+    

+    is_aligned = ((unsigned long)req->buf & mask) ? 0: 1 ;

+    if(likely(is_aligned))

+    {

+        ndp16 = (struct usb_cdc_ncm_ndp16 *)(req->buf + ndpoffset);

+    }

+    else

+    {

+        memcpy(&ndp16_tmp,req->buf + ndpoffset,sizeof(struct usb_cdc_ncm_ndp16)) ;

+        ndp16 = &ndp16_tmp;

+    }

+

+	if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {

+		USBSTACK_DBG( "[%s]:invalid DPT16 length <%u>\n",__func__, le16_to_cpu(ndp16->wLength));

+		goto error;

+	}

+

+	ret = ((le16_to_cpu(ndp16->wLength) -

+					sizeof(struct usb_cdc_ncm_ndp16)) /

+					sizeof(struct usb_cdc_ncm_dpe16));

+	ret--; /* we process NDP entries except for the last one */

+

+	if ((sizeof(struct usb_cdc_ncm_ndp16) +

+	     ret * (sizeof(struct usb_cdc_ncm_dpe16))) > req->actual) {

+		USBSTACK_DBG( "[%s]:Invalid nframes = %d\n", __func__,ret);

+		ret = -EINVAL;

+	}

+

+error:

+	return ret;

+}

+/*

+*  type 0:»ñÈ¡ÊäÈë¶ÔÆë£¬1£º»ñÈ¡Êä³ö¶ÔÆë 

+*/

+int mbim_get_ntb_aligned_offset(unsigned long offset)

+{

+    if(g_mbim == NULL)

+    {

+        return -ENODEV ;

+    }

+    struct f_mbim *mbim = g_mbim ;

+

+    le16_to_cpu(mbim_ntb_parameters.wNdpInDivisor);

+    le16_to_cpu(mbim_ntb_parameters.wNdpInPayloadRemainder);

+    le16_to_cpu(mbim_ntb_parameters.wNdpInAlignment) ;

+    return 0;

+

+}

+

+#define MBIM_EXTRA_PADDING   1

+

+int mbim_get_nth16_and_ndp16_size(void)

+{

+    int cnt = get_vnic_multi_packet_num() + MBIM_EXTRA_PADDING + 1 ;

+    return sizeof(struct usb_cdc_ncm_nth16)+sizeof(struct usb_cdc_ncm_ndp16)+ sizeof(struct usb_cdc_ncm_dpe16)*cnt ;

+}

+int mbim_ncm16_and_ndp16_init(char * buf ,int len)

+{

+    struct f_mbim *mbim= g_mbim ;

+    int siz = mbim_get_nth16_and_ndp16_size() ;

+    if(buf== NULL ||  len < siz || mbim == NULL)

+    {

+        return -EINVAL ;

+    }

+

+    void *tmp ;

+    memset(buf,0,siz);

+    tmp = (void*)buf ;

+

+    put_unaligned_le32(mbim->parser_opts->nth_sign, tmp);

+    tmp += sizeof(mbim->parser_opts->nth_sign) ;

+    //len

+    put_unaligned_le16(mbim->parser_opts->nth_size, tmp);

+    tmp += sizeof(mbim->parser_opts->nth_size) ;

+    //seq

+    put_unaligned_le16(mbim->tx_seq++, tmp);

+    tmp += sizeof(mbim->tx_seq) ;

+    //block_len

+    

+    put_unaligned_le16(siz, tmp);

+    tmp += sizeof(mbim->parser_opts->block_length);

+    

+    //index

+    uint16_t  index = sizeof(struct usb_cdc_ncm_nth16) ;

+    put_unaligned_le16(index, tmp);

+    tmp += sizeof(index) ;

+

+    //ndp

+    //sig£¬ÐÞ¸ÄΪips

+    uint8_t sess_id = atomic_read(&mbim->session_id) ;

+    u32 sig = USB_CDC_MBIM_NDP16_IPS_SIGN | (sess_id << 24) ;

+    put_unaligned_le32(sig, tmp);

+    tmp += sizeof(mbim->parser_opts->ndp_sign) ;

+    //len

+    put_unaligned_le16(sizeof(struct usb_cdc_ncm_ndp16)+sizeof(struct usb_cdc_ncm_dpe16), tmp);

+	return 0 ;

+}

+

+

+int mbim_fill_ncm16_vary_head_info(char *buf ,struct mbim_ncm_info *info)

+{

+    //¸ü¸Änth´óС£¬ÐòºÅ,ndp index

+    if(buf== NULL || info == NULL)

+    {

+        return -EINVAL ;

+    }

+

+    char *nth16;

+    char *ndp16;

+    char *datagram_cur_buf ;

+    uint16_t ndp16_len;

+    char *ndp16_buf = NULL ;

+#if 0

+    unsigned long mask = 3;

+    bool is_aligned = ((unsigned long)buf & mask) ? 0: 1 ;

+    if(likely(is_aligned))

+    {

+        nth16 = (struct usb_cdc_ncm_nth16 *)(buf);

+        nth16->wBlockLength = cpu_to_le16(info->nth_block_len);

+        ndp16_buf = buf + cpu_to_le16(ndp16->wNextNdpIndex);

+        ndp16  =  (struct usb_cdc_ncm_ndp16 *)(ndp16_buf );

+        ndp16_len = cpu_to_le16(ndp16->wLength);

+        //¼ÆËã¿ÕÏÐdatagram offset

+        datagram_cur_buf = (struct usb_cdc_ncm_dpe16  *)(ndp16_buf + ndp16_len - sizeof(struct usb_cdc_ncm_dpe16)) ;

+        datagram_cur_buf->wDatagramIndex =  cpu_to_le16(info->ndp_datagram_off);

+        datagram_cur_buf->wDatagramLength =  cpu_to_le16(info->ndp_datagram_len);

+        ndp16->wLength =  cpu_to_le16(ndp16_len+sizeof(struct usb_cdc_ncm_dpe16));

+    }

+    else

+ #endif       

+    {

+

+		char *ptmp = buf+offsetof(struct usb_cdc_ncm_nth16, wBlockLength);

+		//printk("\nwBlockLength :0x%02x-0x%02x\n",ptmp[0],ptmp[1]);

+        put_unaligned_le16(info->nth_block_len,ptmp) ;

+		//printk("[func]:%s,[line]:%d, val = %d \n",__func__,__LINE__,get_unaligned_le16(ptmp));

+		ptmp = buf+offsetof(struct usb_cdc_ncm_nth16, wNdpIndex);

+		//printk("\nwNdpIndex:0x%02x-0x%02x\n",ptmp[0],ptmp[1]);

+        ndp16_buf = buf + get_unaligned_le16(ptmp) ;

+        ndp16_len  = get_unaligned_le16(ndp16_buf + 4) ;

+		//printk("[func]:%s,[line]:%d, val = %d \n",__func__,__LINE__,ndp16_len);

+        datagram_cur_buf = ndp16_buf + ndp16_len - sizeof(struct usb_cdc_ncm_dpe16);

+        put_unaligned_le16(info->ndp_datagram_off,datagram_cur_buf+offsetof(struct usb_cdc_ncm_dpe16, wDatagramIndex));

+        put_unaligned_le16(info->ndp_datagram_len,datagram_cur_buf+offsetof(struct usb_cdc_ncm_dpe16, wDatagramLength));

+        put_unaligned_le16(ndp16_len+sizeof(struct usb_cdc_ncm_dpe16),ndp16_buf + 4);

+

+    }

+

+ 	return 0 ; 

+

+

+}

+

+int mbim_get_first_ndp16_offset(struct            usb_request *req)

+{

+     if(req == NULL)

+    {

+        printk("[%s]:input param invalid \n",__func__)  ;

+        return -EINVAL ;

+    }

+    return cdc_ncm_rx_verify_nth16(req);

+}

+

+int mbim_get_trans_buffer_size(void)

+{

+    //

+    if (!g_mbim)

+    {

+        return -1;

+    }

+    return   max_t( u32 ,g_mbim->port.fixed_out_len, g_mbim->port.fixed_in_len);

+

+}

+

+

+/*

+ *»ñÈ¡°üÆ«ÒÆ

+*/

+int mbim_get_next_datagram_fragment(struct           usb_request *req ,int prev_ndp_off, int* cur_first_datagram ,int *next_ndp)

+{

+    int nframes = 0;

+    int ret = -EINVAL ;

+    uint32_t	Signature;

+	int i = 0 ;

+	char* pbuf ;

+

+    if(req == NULL || req->buf == NULL || prev_ndp_off <= 0 || cur_first_datagram == NULL || next_ndp ==NULL )

+    {

+        printk("[%s]:input param invalid \n",__func__)  ;

+        goto ERR ;

+    }

+ 

+

+    nframes = cdc_ncm_rx_verify_ndp16(req, prev_ndp_off);

+    if(nframes < 0)

+    {

+        goto ERR ;

+    }

+    

+    Signature = get_unaligned_le32(req->buf + prev_ndp_off +offsetof(struct usb_cdc_ncm_ndp16,dwSignature)) ;

+    //printk("dwSignature = 0x%x",dwSignature) ;

+	switch (Signature & 0x00ffffff) {

+	case USB_CDC_MBIM_NDP16_IPS_SIGN:

+		break;

+	case USB_CDC_MBIM_NDP16_DSS_SIGN:

+        USBSTACK_DBG("[%s]: packet type is DSS",__func__) ;

+        goto ERR ;

+		break;

+	default:

+		USBSTACK_DBG( "[%s] : unsupported NDP signature <0x%08x>\n",__func__,Signature);

+		goto ERR;

+        break ;

+	}

+    *cur_first_datagram =  prev_ndp_off + sizeof(struct usb_cdc_ncm_ndp16) ;

+    *next_ndp = get_unaligned_le16(req->buf + prev_ndp_off+offsetof(struct usb_cdc_ncm_ndp16,wNextNdpIndex)) ;

+    return  nframes ;

+ERR:

+    return ret ;

+}

+

+int mbim_get_reverse_head_size()

+{

+    //Ôݲ»¿¼ÂÇvlanʱÐèÒª¿Õ³öµÄ´óС

+    return ETH_HLEN ;

+}

+

+static struct sk_buff *mbim_wrap_ntb(struct gether *port,struct sk_buff *skb)

+{

+    //´´½¨µÄÐéÄâÍø¿¨Î´×¢²ávlanÏà¹ØµÄ»Øµ÷£¬Òò´Ë²»Ö§³ÖvlanÏà¹ØµÄ´¦Àí

+    

+    bool is_ip = false;

+	int i = 0 ;

+    skb_reset_mac_header(skb) ;

+    is_ip = is_ip_proto(eth_hdr(skb)->h_proto);

+    //printk("%s: mac frame is 802.3 ,start exe mbim_wrap_ntb ......\n",__func__) ;	

+    //<=1500ÊÇ802.3Ö¡£¬>=1536ÊÇEthernet IIÖ¡

+    //mbimÖ»´«µÝip°ü£¬·Çip°ü²»´«µÝ

+    if(is_ip == false)

+    {

+         USBSTACK_DBG("%s: mac frame is 802.3 ,system is unsupported mac type ,mac type (be) is 0x%04x\n"

+            ,__func__,eth_hdr(skb)->h_proto) ;	 

+         dev_kfree_skb_any(skb);

+         return NULL;

+    }

+    skb_pull(skb, ETH_HLEN);

+	return skb;

+}

+

+

+static int mbim_unwrap_ntb(struct gether *port,struct sk_buff *skb,struct sk_buff_head *list)

+{

+    //Èë¶ÓÁÐÍ·

+    if(port == NULL || skb == NULL || list == NULL )

+    {

+        printk("[%s]:input param invalid \n",__func__)  ;

+        return -EINVAL ;

+    }

+

+    char *ipth = skb->data + ETH_HLEN ;

+	//printk("[%s] :before skb->lenth = %d!!!!",__func__,skb->len);

+    skb_put(skb, ETH_HLEN);

+	//printk("[%s] :after skb->lenth= %d!!!!",__func__,skb->len);

+    //¼ÓmacÍ·£¬Ôݲ»¿¼ÂÇvlan

+	__be16 proto = htons(ETH_P_802_3);

+    switch (*ipth & 0xf0)

+     {

+        case 0x40:

+            proto = htons(ETH_P_IP);

+			skb->protocol = ETH_P_IP ;

+            break;

+        case 0x60:

+            proto = htons(ETH_P_IPV6);

+			skb->protocol = ETH_P_IPV6 ;

+            break;

+        default:

+            dev_kfree_skb_any(skb) ;

+            USBSTACK_DBG("[%s] :ip type is unknown:0x%x !!!!\n",__func__,proto);

+            return -1 ;

+   }

+

+    

+    //skb_push(skb, ETH_HLEN);

+    skb_reset_mac_header(skb) ;

+    //ÉèÖÃÔ´/Ä¿µÄmacµØÖ·

+    struct ethhdr *ehdr =eth_hdr(skb) ; //(struct ethhdr *)skb->data;  // ¶ÔÆë

+	memcpy(ehdr->h_source,mbim_mac,sizeof(mbim_mac));

+    memcpy(ehdr->h_dest,port->ioport->net->dev_addr,sizeof(ehdr->h_dest));

+    skb_queue_tail(list,skb) ;

+    //ÉèÖÃipÀàÐÍ

+    ehdr->h_proto = proto ;

+    return 0 ;

+}

+

+void mbim_release_pool(struct f_mbim *mbim)

+{

+	if(!mbim){

+		printk("mbim_release_pool, mbim is null\n");

+	}

+ 

+	 mbim_conn_pool_deinit(mbim);

+}

+

+static void mbim_disable(struct usb_function *f)

+{

+	struct f_mbim		*mbim = func_to_mbim(f);

+	struct usb_composite_dev *cdev = f->config->cdev;

+	//gether_mbim_uevent(0);

+     unsigned long flags ;

+	 struct mbim_pool_ctrl_s *pctrl = NULL ;

+    printk("\n%s, line:%u \n\n\n", __func__, __LINE__);

+	mbim->trans_flag = 0;

+	atomic_set(&mbim->notify_count, 0);

+	//USBSTACK_DBG(cdev, "mbim deactivated\n");

+	if (mbim->port.in_ep->driver_data)

+		gether_disconnect(&mbim->port);

+

+	if (mbim->notify->driver_data) {

+		usb_ep_disable(mbim->notify);

+		mbim->notify->driver_data = NULL;

+		mbim->notify->desc = NULL;

+	}

+	

+     spin_lock_irqsave(&mbim->ctrl_lock,flags);	

+	//check ctrl rx/tx list

+	if(!list_empty(&mbim->ctrl_tx_list)){

+		printk("mbim_disable, clean ctrl_tx_list, cnt:%u\n", atomic_read(&mbim->ctrl_tx_cnt));

+		while(!list_empty(&mbim->ctrl_tx_list)){

+		     pctrl = list_first_entry(&mbim->ctrl_tx_list, struct mbim_pool_ctrl_s, list) ;

+			list_del_init(&pctrl->list);

+			atomic_dec(&mbim->ctrl_tx_cnt) ;

+			pctrl->real_size = 0;

+			list_add_tail(&pctrl->list,&mbim->idle_list);

+			atomic_inc(&mbim->idle_cnt) ;			

+		}

+	}

+	if(!list_empty(&mbim->ctrl_rx_list)){

+		printk("mbim_disable, clean ctrl_rx_list, cnt:%u\n", atomic_read(&mbim->ctrl_rx_cnt));

+		while(!list_empty(&mbim->ctrl_rx_list)){

+		     pctrl = list_first_entry(&mbim->ctrl_rx_list, struct mbim_pool_ctrl_s, list) ;

+			list_del_init(&pctrl->list);

+			atomic_dec(&mbim->ctrl_rx_cnt) ;

+			pctrl->real_size = 0;

+			list_add_tail(&pctrl->list,&mbim->idle_list);

+			atomic_inc(&mbim->idle_cnt) ;			

+		}

+	}

+	 spin_unlock_irqrestore(&mbim->ctrl_lock,flags);		

+}

+

+static void mbim_open(struct gether *geth)

+{

+	printk("mbim_open\n");

+	struct f_mbim		*mbim = func_to_mbim(&geth->func);

+	spin_lock(&mbim->lock);

+	mbim->is_open = true;

+	//mbim_notify(mbim);

+	spin_unlock(&mbim->lock);

+}

+

+static void mbim_close(struct gether *geth)

+{

+	printk("mbim_close\n");

+	struct f_mbim		*mbim = func_to_mbim(&geth->func);

+    if(mbim->is_open == false)

+    {

+        printk("mbim net card has been closed !!! \n") ;

+        return  ;

+    }

+	//USBSTACK_DBG(mbim->port.func.config->cdev, "%s\n", __func__);

+

+	spin_lock(&mbim->lock);

+	mbim->is_open = false;

+	//mbim_notify(mbim);

+	spin_unlock(&mbim->lock);

+}

+

+int mbim_set_param_medium(u8 configNr, u32 medium)

+{

+	printk("$$$$mbim_set_param_medium\n");

+	//pr_debug("%s: %u %u\n", __func__, medium, speed);

+	//if (configNr >= RNDIS_MAX_CONFIGS) return -1;

+	mbim_per_dev_params[configNr].medium = medium;

+

+	return 0;

+}

+

+void mbim_set_host_mac(int configNr, const u8 *addr)

+{

+	mbim_per_dev_params[configNr].host_mac = addr;

+}

+

+

+static int 

+mbim_bind(struct usb_configuration *c, struct usb_function *f)

+{

+	printk("$$$$mbim_bind\n");

+	struct usb_composite_dev *cdev = c->cdev;

+	struct f_mbim		*mbim = func_to_mbim(f);

+	int			status;

+	struct usb_ep		*ep;

+

+	/* allocate instance-specific interface IDs */

+	status = usb_interface_id(c, f);

+	if (status < 0)

+		goto fail;

+	mbim->ctrl_id = status;

+	mbim_iad_descriptor.bFirstInterface = status;

+

+	mbim_comm_intf.bInterfaceNumber = status;

+	mbim_union_desc.bMasterInterface0 = status;

+

+	status = usb_interface_id(c, f);

+	if (status < 0)

+		goto fail;

+	mbim->data_id = status;

+

+	mbim_data_intf.bInterfaceNumber = status;

+	mbim_data_intf1.bInterfaceNumber = status;

+	mbim_union_desc.bSlaveInterface0 = status;

+

+	status = -ENODEV;

+

+	/* allocate instance-specific endpoints */

+	ep = usb_ep_autoconfig(cdev->gadget, &mbim_in_desc);

+	if (!ep)

+		goto fail;

+	mbim->port.in_ep = ep;

+	printk("$$$$mbim->port.in_ep->desc = %p\n",mbim->port.in_ep->desc);

+	ep->driver_data = cdev;	/* claim */

+

+	ep = usb_ep_autoconfig(cdev->gadget, &mbim_out_desc);

+	if (!ep)

+		goto fail;

+	mbim->port.out_ep = ep;

+	printk("$$$$mbim->port.out_ep->desc = %p\n",mbim->port.out_ep->desc);

+	ep->driver_data = cdev;	/* claim */

+

+	ep = usb_ep_autoconfig(cdev->gadget, &mbim_notify_desc);

+	if (!ep)

+		goto fail;

+	mbim->notify = ep;

+	printk("$$$$mbim->notify->desc = %p\n",mbim->notify->desc);

+	ep->driver_data =   cdev;	/* claim */

+

+

+	status = -ENOMEM;

+

+	/* allocate notification request and buffer */

+	mbim->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);

+	if (!mbim->notify_req)

+		goto fail;

+	mbim->notify_req->buf = kmalloc(8, GFP_KERNEL);

+	if (!mbim->notify_req->buf)

+		goto fail;

+	mbim->notify_req->length = 8;//MBIM_STATUS_BYTECOUNT;

+	mbim->notify_req->context = mbim;

+	mbim->notify_req->complete = mbim_response_complete;//mbim_notify_complete;

+#if 1

+	/* copy descriptors, and track endpoint copies */

+	f->descriptors = usb_copy_descriptors(mbim_descriptor_function);

+	if (!f->descriptors)

+		goto fail;

+#endif

+	if (gadget_is_dualspeed(c->cdev->gadget)) {

+		mbim_data_in.bEndpointAddress =

+				mbim_in_desc.bEndpointAddress;

+		mbim_data_out.bEndpointAddress =

+				mbim_out_desc.bEndpointAddress;

+		mbim_data_notify.bEndpointAddress =

+				mbim_notify_desc.bEndpointAddress;

+

+		/* copy descriptors, and track endpoint copies */

+		f->hs_descriptors = usb_copy_descriptors(mbim_data_descriptor_function);

+		if (!f->hs_descriptors)

+			goto fail;

+	}

+	

+

+	/*

+	 * NOTE:  all that is done without knowing or caring about

+	 * the network link ... which is unavailable to this code

+	 * until we're activated via set_alt().

+	 */

+

+	mbim->port.open = mbim_open;

+	mbim->port.close = mbim_close;

+	

+	status = mbim_register(mbim_response_available, mbim);

+	if (status < 0)

+		goto fail;

+

+	mbim->config = status;

+

+	//mbim_set_param_medium(mbim->config, NDIS_MEDIUM_802_3);

+	mbim_set_host_mac(mbim->config, mbim->ethaddr);

+

+	//

+	multi_packet_handle_init(&mbim->port, cdev->gadget);

+#if MULTIPACKET_BUF_ALLOC

+	mbim->state = 1;

+#endif     

+

+	//USBSTACK_DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",

+	//		gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",

+	//		mbim->port.in_ep->name, mbim->port.out_ep->name,

+	//		mbim->notify->name);

+	return 0;

+

+fail:

+     printk("\n\n[file]: %s ,[][func]: %s ,[line] = %d \n\n",__FILE__,__func__,__LINE__);

+	if (f->descriptors)

+		usb_free_descriptors(f->descriptors);

+

+	if (mbim->notify_req) {

+		kfree(mbim->notify_req->buf);

+		usb_ep_free_request(mbim->notify, mbim->notify_req);

+	}

+

+	/* we might as well release our claims on endpoints */

+	if (mbim->notify)

+		mbim->notify->driver_data = NULL;

+	if (mbim->port.out_ep)

+		mbim->port.out_ep->driver_data = NULL;

+	if (mbim->port.in_ep)

+		mbim->port.in_ep->driver_data = NULL;

+

+	USB_ASSERT(cdev, "%s: can't bind, err %d\n", f->name, status);

+

+	return status;

+}

+

+static void mbim_unbind(struct usb_configuration *c, struct usb_function *f)

+{

+	struct f_mbim		*mbim = func_to_mbim(f);

+	mbim->trans_flag = 0;

+

+	mbim_release_pool(mbim);

+	

+    multi_packet_handle_exit();

+    rndis_deregister(mbim->config);

+	//USBSTACK_DBG(c->cdev, "mbim unbind\n");

+

+	if (gadget_is_dualspeed(c->cdev->gadget))

+		usb_free_descriptors(f->hs_descriptors);

+	usb_free_descriptors(f->descriptors);

+

+

+	kfree(mbim->notify_req->buf);

+	mbim->notify_req->buf = NULL;

+	usb_ep_free_request(mbim->notify, mbim->notify_req);

+

+#if MULTIPACKET_BUF_ALLOC

+	mbim->state = 0;

+#endif 

+	mbim_string_defs[1].s = NULL;

+	//kfree(mbim);

+}

+

+/**

+ * mbim_bind_config - add CDC Network link to a configuration

+ * @c: the configuration to support the network link

+ * @ethaddr: a buffer in which the ethernet address of the host side

+ *	side of the link was recorded

+ * Context: single threaded during gadget setup

+ *

+ * Returns zero on success, else negative errno.

+ *

+ * Caller must have called @gether_setup().  Caller is also responsible

+ * for calling @gether_cleanup() before module unload.

+ */

+ int mbim_init(void)

+{

+	u8 i;

+

+

+	for (i = 0; i < MBIM_MAX_CONFIGS; i++) {

+

+

+		mbim_per_dev_params[i].confignr = i;

+		mbim_per_dev_params[i].used = 0;

+		mbim_per_dev_params[i].state = 0;

+		mbim_per_dev_params[i].media_state

+				= 0;

+		INIT_LIST_HEAD(&(mbim_per_dev_params[i].resp_queue));

+	}

+

+	//rndis_initialized = true;

+	return 0;

+}

+ 

+#define Z_IPV4_FLAG           0x40

+#define Z_IPV6_FLAG           0x60

+ void mbim_mac_exchange(u8 *buff)

+ {

+	 

+	 //mbim_ReversIpAddr(inBufData->node.pNodeBuf);

+	 u8 tempbuff[16] = {0};

+	 BUG_ON(buff == NULL);

+	 //zOss_Printf(1, PRINT_LEVEL_NORMAL, "\r\n mbim_TransIpLoopback eeeeeeeeeeeeeeeeeeee\r\n");

+	 if(((*buff) & 0xF0) == Z_IPV4_FLAG)

+	 {

+	 	//printk("mbim_mac_exchange v4\r\n");

+		 memcpy(tempbuff, buff+12, 4);

+		 memcpy(buff+12, buff+16, 4);

+		 memcpy(buff+16, tempbuff, 4);

+	 }

+	 else if(((*buff) & 0xF0) == Z_IPV6_FLAG)

+	 {

+		// printk("mbim_mac_exchange v6\r\n");

+		 memcpy(tempbuff, buff+8, 16);

+		 memcpy(buff+8, buff+24, 16);

+		 memcpy(buff+24, tempbuff, 16);

+	 }

+	 else

+	 {

+		 panic("mac_exchange unknown type\n");

+	 }

+ }

+int loop_rcv_packet=0;

+ int loop_rcv_byte=0;

+ 

+ struct sk_buff  *t_skb = NULL;

+ void mbim_lp_test_th(void *ptr)

+ {

+ 

+	unsigned long flags;

+	int	retval = -ENOMEM;

+	int wait_event_ret = 0;

+	struct usb_request	*req = NULL;

+	

+    struct list_head  *rx_list_head ;	

+    struct list_head  *rx_node ;

+    struct mbim_pool_data_s * data_chan;

+//       struct gether *port = container_of(ep, struct gether, out_ep);

+//	struct eth_dev	*dev = container_of(port, struct eth_dev, port_usb);

+	 struct sk_buff  *skb = NULL;

+

+	 struct f_mbim	 *mbim = (struct f_mbim	*)ptr ;

+	 

+	while(!kthread_should_stop())

+	{

+		wait_event_ret = wait_event_interruptible(mbim->lp_wait,  

+			atomic_read(&mbim->data_rx_cnt)||kthread_should_stop());

+		

+		if(kthread_should_stop())

+		{

+			printk("lp test thread stop");

+			break;

+		}

+

+		if(atomic_read(&mbim->lb_flag) == 0)

+			continue;

+	//printk("lp_test_th, datalock:%x, cnt:%d\n", &mbim->data_lock, atomic_read(&mbim->data_rx_cnt));	

+ spin_lock_irqsave(&mbim->data_lock,flags);

+

+    if(list_empty(&mbim->data_rx_list))

+    {

+        atomic_set(&mbim->data_rx_cnt,0) ;

+        spin_unlock_irqrestore(&mbim->data_lock,flags);

+        printk("[func]:%s,[line]:%d ,data rx list is empty \n",__func__,__LINE__) ;

+       continue ;

+    }

+	

+	while(!list_empty(&mbim->data_rx_list)){			

+		rx_list_head = &mbim->data_rx_list;

+		

+		if((rx_list_head == NULL) || (rx_list_head->next == rx_list_head)){

+			spin_unlock_irqrestore(&mbim->data_lock,flags);

+			break;

+		}

+		data_chan =  list_first_entry(rx_list_head, struct mbim_pool_data_s, list) ;

+		list_del_init(&data_chan->list) ;

+		atomic_dec(&mbim->data_rx_cnt);

+		spin_unlock_irqrestore(&mbim->data_lock,flags);

+

+		skb = (struct sk_buff	*)(data_chan->pdata) ;

+		if(skb == NULL){		

+			printk("mbim_data_read, skb is null\r\n");

+			spin_lock_irqsave(&mbim->data_lock,flags);

+			

+			continue;

+		}	

+		t_skb = skb;

+		skb_pull(skb, ETH_HLEN);

+		wmb();

+		mbim_mac_exchange(skb->data);

+

+		if(atomic_read(&g_mbim->lb_flag) == 0)

+			break;		

+		if(mbim_loop_test_xmit(mbim->port.ioport,skb)) 

+		{

+			printk( "%s :send skb failed \n",__func__);

+		}

+		spin_lock_irqsave(&mbim->data_lock,flags);

+		list_add_tail(&data_chan->list,&mbim->data_ilde_list);

+		atomic_inc(&mbim->data_idle_cnt) ;		

+		

+	}//while rx_list_head

+	spin_unlock_irqrestore(&mbim->data_lock,flags);

+	

+	}

+ 

+ }

+

+ int  mbim_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])

+{

+	printk("mbim_bind_config\n");

+	struct f_mbim	*mbim ;

+	int		status;

+    if(g_mbim == NULL)

+    {

+         printk("[func]:%s,[line]:%d ,no function mbim  \n",__func__,__LINE__) ;

+        return -ENODEV;

+    }

+    mbim = g_mbim ;

+    if(mbim_conn_pool_init(mbim))

+    {

+        printk("[func]:%s,[line]:%d ,init pool failed\n",__func__,__LINE__) ;

+        return -ENOMEM ;

+    }

+

+//	if (!can_support_ecm(c->cdev->gadget) || !ethaddr)

+//		return -EINVAL;

+	mbim_init();

+	/* maybe allocate device-global string IDs */

+	if (mbim_string_defs[0].id == 0) {

+

+		/* control interface label */

+		status = usb_string_id(c->cdev);

+		if (status < 0)

+			return status;

+		mbim_string_defs[STRING_CTRL_IDX].id = status;

+		mbim_comm_intf.iInterface = status;

+

+		/* data interface label */

+		status = usb_string_id(c->cdev);

+		if (status < 0)

+			return status;

+		mbim_string_defs[STRING_DATA_IDX].id = status;

+		mbim_data_intf.iInterface = status;

+		mbim_data_intf1.iInterface = status;

+

+		/* MAC address */

+		status = usb_string_id(c->cdev);

+		if (status < 0)

+			return status;

+		mbim_string_defs[STRING_MAC_IDX].id = status;

+		ecm_desc.iMACAddress = status;

+

+		/* IAD */

+		status = usb_string_id(c->cdev);

+		if (status < 0)

+			return status;

+		mbim_string_defs[STRING_IAD_IDX].id = status;

+		mbim_iad_descriptor.iFunction = status;

+	}

+

+	/* export host's Ethernet address in CDC format */

+	snprintf(mbim->ethaddr, sizeof mbim->ethaddr,

+		"%02X%02X%02X%02X%02X%02X",

+		ethaddr[0], ethaddr[1], ethaddr[2],

+		ethaddr[3], ethaddr[4], ethaddr[5]);

+	mbim_string_defs[1].s = mbim->ethaddr;

+

+	spin_lock_init(&mbim->lock);

+	mbim_reset_values(mbim);

+    //mbim->port.is_fixed = true;

+	mbim->port.func.name = "mbim";

+	mbim->port.func.strings = mbim_strings;

+	/* descriptors are per-instance copies */

+	mbim->port.func.bind = mbim_bind;

+	mbim->port.func.unbind = mbim_unbind;

+	mbim->port.func.set_alt = mbim_set_alt;

+	mbim->port.func.get_alt = mbim_get_alt;

+	mbim->port.func.setup = mbim_setup;

+	mbim->port.func.disable = mbim_disable;

+    

+#ifdef CONFIG_PM

+    mbim->port.func.suspend = mbim_suspend;

+    mbim->port.func.resume = mbim_resume;

+	atomic_set(&mbim->port.wake_state, 0);

+#endif

+

+	mbim->port.wrap = mbim_wrap_ntb;

+	mbim->port.unwrap = mbim_unwrap_ntb;

+	printk("$$$$MBIM usb_add_function\n");

+	status = usb_add_function(c, &mbim->port.func);

+	return status;

+}

+

+

+/*==========================================================================*/

+/*==========================================================================*/

+

+ int mbim_loop_test_xmit( struct eth_dev      *dev ,struct sk_buff* skb) ;

+ int mbim_switch_network_mode(struct gether *link ,int type);

+ 

+ static int mbim_ctrl_open(struct inode *ip, struct file *fp)

+ {

+

+    if (!g_mbim)

+    {

+        return -ENODEV;

+    }

+    printk("\n\n [func]:%s,[line]:%d  \n\n",__func__,__LINE__) ;

+    fp->private_data = (void *)g_mbim ;

+    return 0;

+ }

+ 

+ static int mbim_ctrl_release(struct inode *ip, struct file *fp)

+ {

+     printk("\n\n [func]:%s,[line]:%d  \n\n",__func__,__LINE__) ;

+     return 0;

+ }

+

+static ssize_t mbim_ctrl_read(struct file *fp, char __user *buf,size_t count, loff_t *pos)

+{

+

+	int rn_cnt = 0 ;

+	unsigned long flags;

+	struct mbim_pool_ctrl_s *pctrl = NULL ;

+	if (count < MBIM_MAX_CTRL_MSG) 

+	{

+	  printk("[func]:%s,[line]:%d ,rx list is empty \n",__func__,__LINE__) ;

+	  return -EINVAL;

+	}

+

+	struct f_mbim* mbim_dev = (struct f_mbim *)fp->private_data;

+

+	if(fp->f_flags& O_NONBLOCK  && atomic_read(&mbim_dev->ctrl_rx_cnt) == 0)

+	{

+	 return -EAGAIN ;

+	}

+	if(wait_event_interruptible(mbim_dev->ctrl_read_wq, atomic_read(&mbim_dev->ctrl_rx_cnt)))

+	 return -ERESTARTSYS ;

+

+

+	//È¡³öÁ´±íÄÚÈÝ£¬Éϱ¨ÉÏÈ¥

+	spin_lock_irqsave(&mbim_dev->ctrl_lock,flags);

+	if(list_empty(&mbim_dev->ctrl_rx_list))

+	{

+		atomic_set(&mbim_dev->ctrl_rx_cnt,0) ;

+		spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);

+		printk("[func]:%s,[line]:%d ,rx list is empty ,ctrl_rx_cnt = %d \n",__func__,__LINE__,atomic_read(&mbim_dev->ctrl_rx_cnt)) ;

+		return -EAGAIN ;

+	}

+	pctrl = list_first_entry(&mbim_dev->ctrl_rx_list, struct mbim_pool_ctrl_s, list) ;

+	list_del_init(&pctrl->list);

+	atomic_dec(&mbim_dev->ctrl_rx_cnt) ;

+	spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);

+

+	if(copy_to_user(buf, pctrl->buf,pctrl->real_size)) 

+	{

+		spin_lock_irqsave(&mbim_dev->ctrl_lock,flags);

+		list_add(&pctrl->list,&mbim_dev->ctrl_rx_list);

+		atomic_inc(&mbim_dev->ctrl_rx_cnt) ;

+		spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);

+		printk("\n\n##########[func]:%s,[line]:%d ,copy_to_user failed \n\n\n",__func__,__LINE__) ;

+		return -EFAULT ;

+	}

+	rn_cnt = pctrl->real_size ;

+	drv_mbim_rx_static(pctrl);

+	pctrl->real_size = 0 ;

+	spin_lock_irqsave(&mbim_dev->ctrl_lock,flags);

+	//list_del_init(&pctrl->list);

+	//atomic_dec(&mbim_dev->ctrl_rx_cnt) ;

+	list_add_tail(&pctrl->list,&mbim_dev->idle_list);

+	atomic_inc(&mbim_dev->idle_cnt) ;

+	spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);

+

+	return  rn_cnt;

+}

+

+static ssize_t mbim_ctrl_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos)

+{

+	int rn_cnt = 0 ;

+	unsigned long flags ;

+	int status = 0;

+	struct f_mbim* mbim_dev = (struct f_mbim *)fp->private_data;

+	struct mbim_pool_ctrl_s *pctrl = NULL ;

+	

+	if (count > MBIM_MAX_CTRL_MSG || count <= 12) 

+	{

+		printk("[func]:%s,[line]:%d ,param invalid \n",__func__,__LINE__) ;

+		return -EINVAL;

+	}

+

+	if(mbim_dev->trans_flag == 0){

+		USBSTACK_DBG("ctrl_write, ep not enabled, send nothing\n");

+		printk("ctrl_write, ep not enabled, send nothing\n");

+		return count;

+	}

+	u8 cid_buf_head[12] ;

+	if(copy_from_user(cid_buf_head , buf,12))

+	{

+		 printk("[func]:%s,[line]:%d , copy data from user is error \n",__func__,__LINE__) ;

+		 return -EFAULT ;

+	}

+	if(mbim_cid_msg_filter(cid_buf_head, 12, 0))

+	{

+		 printk("[func]:%s,[line]:%d , cid msg is intercepted \n",__func__,__LINE__) ;

+		 return count;

+	}

+

+	if(fp->f_flags& O_NONBLOCK && atomic_read(&mbim_dev->idle_cnt) == 0)

+	{

+		 printk("[func]:%s,[line]:%d ,idle list is empty \n",__func__,__LINE__) ;

+		 return -EAGAIN ;

+	}

+

+	if(wait_event_interruptible(mbim_dev->ctrl_write_wq, atomic_read(&mbim_dev->idle_cnt)))

+	{

+		 return -ERESTARTSYS ;

+	}

+	if(mbim_dev->trans_flag == 0){

+		USBSTACK_DBG("ctrl_write, ep not enabled, send nothing\n");

+		printk("ctrl_write, ep not enabled, send nothing\n");

+		return count;

+	}

+	

+	spin_lock_irqsave(&mbim_dev->ctrl_lock,flags);

+	if(list_empty(&mbim_dev->idle_list))

+	{

+		atomic_set(&mbim_dev->idle_cnt,0) ;

+		spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);

+		printk("[func]:%s,[line]:%d ,idle list is empty \n",__func__,__LINE__) ;

+		return -EAGAIN ;

+

+	}

+	pctrl = list_first_entry(&mbim_dev->idle_list, struct mbim_pool_ctrl_s, list) ;

+	list_del_init(&pctrl->list);

+	atomic_dec(&mbim_dev->idle_cnt) ;

+	spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);

+

+	if(copy_from_user(pctrl->buf,buf,count) )

+	{

+		spin_lock_irqsave(&mbim_dev->ctrl_lock,flags);

+		list_add_tail(&pctrl->list,&mbim_dev->idle_list);

+		atomic_inc(&mbim_dev->idle_cnt) ;

+		spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);

+		printk("[func]:%s,[line]:%d ,copy_from_user failed \n",__func__,__LINE__) ;

+		return -EFAULT ;

+	}

+	pctrl->real_size = count ;

+	spin_lock_irqsave(&mbim_dev->ctrl_lock,flags);

+	//list_del_init(&pctrl->list);

+	//atomic_dec(&mbim_dev->idle_cnt);

+	list_add_tail(&pctrl->list, &mbim_dev->ctrl_tx_list);

+	atomic_inc(&mbim_dev->ctrl_tx_cnt) ;

+	drv_mbim_tx_static(pctrl);

+	spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);

+	//now first check is suspend

+ 	

+#ifdef CONFIG_PM

+	struct gether *port = &mbim_dev->port; 	

+	if(port && port->suspend_state && atomic_read(&port->wake_state) == 0){

+		if(port->func.config && port->func.config->cdev && port->func.config->cdev->gadget) {

+			printk("-----mbim_ctrl_write, call usb_gadget_wakeup\n");

+			if(mbim_dev->trans_flag == 0){

+				//now check trans_flag, maybe had been disabled

+				USBSTACK_DBG("ctrl_write, trans_flag is 0, don't wakeup\n");

+				printk("ctrl_write,  trans_flag is 0, don't wakeup\n");

+				return count;

+			}

+			atomic_set(&port->wake_state, 1);

+		    usb_gadget_wakeup(port->func.config->cdev->gadget);

+			//do{

+				//msleep(1);

+			//}while(port->suspend_state==1);

+			atomic_set(&port->wake_state, 0);

+		}

+	} 

+#endif

+

+	//notify host to get data !!!

+	if(mbim_response_available(mbim_dev)){

+		printk("response_available delete tx\n");

+		spin_lock_irqsave(&mbim_dev->ctrl_lock,flags);

+		//maybe ctrl_tx_list has been clean up at this time while mbim_disable

+		if(!list_empty(&mbim_dev->ctrl_tx_list)){

+			list_del_init(&pctrl->list);

+			atomic_dec(&mbim_dev->ctrl_tx_cnt);

+			list_add_tail(&pctrl->list, &mbim_dev->idle_list);

+			atomic_inc(&mbim_dev->idle_cnt) ;

+		}

+		spin_unlock_irqrestore(&mbim_dev->ctrl_lock,flags);		

+	}

+	//printk("\n\n########%s, %u ,end \n\n", __func__, __LINE__ );

+	return count;

+}

+

+int mbim_get_work_mode(void)

+{

+	if(g_mbim == NULL)		

+		return -ENODEV ;

+	

+	struct gether *link = &g_mbim->port;

+ 	struct eth_dev *dev = link->ioport;

+	return  atomic_read(&dev->work_mode);

+

+}

+

+static long mbim_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)

+{

+    if(_IOC_TYPE(cmd) != MBIM_MAGIC)

+    {

+        return -ENOTTY ;

+    }

+     //Ö¸Áî´¦Àí.......

+            //¶Ï¿ªÍøÂç

+    if(g_mbim == NULL)

+    {

+        printk("[func]:%s,[line]:%d ,mbim function  do not  init \n",__func__,__LINE__) ;

+        return -ENODEV ;

+    }

+    switch(cmd)

+    {

+    case IOCTL_VNIC_SET_BLOCKTIME:

+    {

+        break;

+    }

+

+    case IOCTL_VNIC_FREE_READ_BUF:

+    {

+        break;

+    }

+

+    case IOCTL_VNIC_GET_WRITE_BUF:

+    {

+

+        break;

+    }

+

+    case IOCTL_VNIC_CONNECT:

+    {

+        //Á¬½ÓÍøÂç

+        if(atomic_read(&g_mbim->netlink_path_status ) == 0 )

+        {

+            atomic_inc(&g_mbim->netlink_path_status);

+            //´ò¿ªbulkת·¢

+        }

+        break;

+    }

+

+    case IOCTL_VNIC_DISCONNECT:

+    {

+        //¶Ï¿ªÍøÂç

+        if(atomic_read(&g_mbim->netlink_path_status ) == 1 )

+        {

+            atomic_dec(&g_mbim->netlink_path_status);

+            //¶Ï¿ªÊý¾Ýת·¢

+        }

+

+        break;

+    }

+

+    case IOCTL_VNIC_REG_CBK_XFER_STATISTICS:

+    {

+

+        break;

+    }

+

+    case IOCTL_VNIC_REG_CBK_NIC_DISABLE:

+    {

+

+        break;

+    }

+

+    case IOCTL_VNIC_FREE_UNREAD_BUF:

+    {

+        break;

+    }

+    case IOCTL_VNIC_GET_USB_STATUS:

+    {

+        break;

+    }

+    case IOCTL_VNIC_GET_CID_MAXTRANSFER_SIZE :

+    {

+        //»ñÈ¡CID×î´ó·ÖƬֵ

+        unsigned int val = MBIM_MAX_CTRL_MSG ;

+        if(copy_to_user((unsigned __user *)arg ,&val,sizeof(val))) 

+        {

+            return -EFAULT ;

+        }

+        break ;

+    }

+    case IOCTL_VNIC_SET_CID_SESSIONID:  //ÉèÖÃsession_id

+    {

+         unsigned int sess_id ;

+        if(copy_from_user(&sess_id, arg ,sizeof(sess_id))) 

+        {

+            return -EFAULT ;

+        }

+        printk("[func]:%s ,[line]:%d ,invalid param,sess_id = %d\n",__func__,__LINE__,sess_id);

+        if(sess_id >255)

+        {

+          

+            return -EINVAL ;

+

+        }

+        atomic_set(&g_mbim->session_id,0) ;

+        break ;

+    }

+    case IOCTL_VNIC_SET_CID_DSSSESSIONID:  //ÉèÖÃsession_id

+    {

+        unsigned int sess_id ;

+        if(copy_from_user(&sess_id, arg ,sizeof(sess_id))) 

+        {

+            return -EFAULT ;

+        }

+        printk("[func]:%s ,[line]:%d ,invalid param,sess_id = %d\n",__func__,__LINE__,sess_id);

+        if(sess_id >255)

+        {

+            return -EINVAL ;

+        }

+        atomic_set(&g_mbim->dss_session_id,0) ;

+        break ;

+    }

+    case IOCTL_VNIC_SET_NET_LOOP_TEST:

+    {

+        //ÉèÖòâÊԻػ·

+        

+       #if 0 

+        if(g_mbim->is_open != 0 )

+        {

+            printk("[func]: %s  ,[line]:%d ,please turn off net device \n",__func__,__LINE__) ;

+            return -1 ;

+        }

+		#endif

+        //ÉèÖûػ·²âÊÔ·ÂÕæ

+         unsigned int val ;

+        if(copy_from_user(&val, arg ,sizeof(val))) 

+        {

+            return -EFAULT ;

+        }

+        printk("[func]:%s ,[line]:%d ,IOCTL_VNIC_SET_NET_LOOP_TEST,cmd:%d \n",__func__,__LINE__, val);

+

+			

+        if(mbim_switch_network_mode(&g_mbim->port ,val))

+        {

+            return -ENODEV ;

+        }

+    

+       // msleep(20) ;

+        break ;

+    }

+    case IOCTL_VNIC_GET_NET_STATUS :

+    {

+        //»ñÈ¡ÍøÂç״̬

+        unsigned int val = g_mbim->is_open ;

+        if(copy_to_user((unsigned __user *)arg ,&val,sizeof(val))) 

+        {

+            return -EFAULT ;

+        }

+        break;

+    }

+    default: 

+    {

+        return -ENOTTY ;

+    }

+    

+    }

+    return 0 ;

+}

+ 

+

+

+static int mbim_data_open(struct inode *ip, struct file *fp)

+{

+ 	//mbim_per_dev_params[0].dev

+ 	int rtv = 0;

+	printk("----mbim_data_open enter, g_mbim:%p\r\n", g_mbim);

+//	panic("mbim_data_open enter\n");

+     if (!g_mbim)

+     {

+         return -ENODEV;

+     }

+	 

+	 printk("----mbim_data_open enter\r\n");

+	g_mbim->lp_thread = kthread_run(mbim_lp_test_th, (unsigned long)g_mbim, "mbim_lptest_thread");

+	BUG_ON(IS_ERR(g_mbim->lp_thread));	

+	

+	 atomic_set(&g_mbim->lb_flag,1);

+     fp->private_data = (void *)g_mbim ;

+	struct gether *link = &g_mbim->port;

+ 	struct eth_dev *dev = link->ioport;

+	if(g_mbim->state != 2)

+    {	

+		rtv = multi_packet_buf_alloc();

+		if(rtv < 0)

+        {

+			printk("eth_open, net is mbim and req alloc faild with no memory\n");

+			return rtv;

+		}

+		g_mbim->state = 2;

+	}	

+	multi_packet_activate();

+

+	/* fill the rx queue */

+	rx_fill(dev, GFP_KERNEL);

+

+	/* and open the tx floodgates */

+	atomic_set(&dev->tx_qlen, 0);	 

+	

+	if (link->open)

+		link->open(link);

+    return 0 ;

+ }

+ 

+ static int mbim_data_release(struct inode *ip, struct file *fp)

+ { 

+	 unsigned long	 flags;

+	 printk("----mbim_data_release enter,g_mbim:%p\r\n",g_mbim);

+	// panic("mbim_data_release enter\n");

+     fp->private_data = NULL;

+     if (!g_mbim)

+     {

+         return -ENODEV;

+     }

+	 atomic_set(&g_mbim->lb_flag,0);

+

+	kthread_stop(g_mbim->lp_thread);	 

+	// wake_up(&g_mbim->data_read_wq);

+	// wake_up(&g_mbim->data_write_wq);

+	 printk("----mbim_data_release enter,wakeup rx/tx queue 2 end\r\n");

+

+	struct gether *link = &g_mbim->port;

+	 struct eth_dev *dev = link->ioport;

+

+	 spin_lock_irqsave(&dev->lock, flags);

+	 

+	if (link->close)

+		link->close(link);

+	printk("----mbim_data_release enter,closed link\r\n");

+	

+	u_ether_tx_vnic_packet_list();

+	

+	printk("----mbim_data_release enter,clean tx\r\n");

+	spin_unlock_irqrestore(&dev->lock, flags);

+	

+     return 0;

+ }

+

+

+

+ static ssize_t mbim_data_read(struct file *fp, char __user *buf,size_t count, loff_t *pos)

+ {

+     int rn_cnt = 0 ;

+     unsigned long flags;

+     struct f_mbim* mbim_dev = (struct f_mbim *)fp->private_data;

+     long copy_ret = 0 ;

+    //  printk("\n\n----[func]:%s,[line]:%d .buf:%x\n\n",__func__,__LINE__,buf) ;

+     if(mbim_dev == NULL ||  mbim_dev->port.ioport == NULL )

+     {

+        printk("[func]:%s,[line]:%d no dev \n",__func__,__LINE__) ;

+        return -ENODEV ;

+

+     }

+     if(count < mbim_dev->port.ioport->net->mtu)

+     {

+         printk("[func]:%s,[line]:%d param inval \n",__func__,__LINE__) ;

+         return -EINVAL ;

+     }

+     

+     if(atomic_read(&mbim_dev->data_rx_cnt) == 0)

+     {

+        if(fp->f_flags& O_NONBLOCK)

+        {

+			printk("[func]:%s,[line]:%d f_flags& O_NONBLOCK return \n",__func__,__LINE__) ;

+            return -EAGAIN ;

+        }

+		

+		//printk("\r\n-----[func]:%s,[line]:%d now wait \r\n",__func__,__LINE__) ;

+        if(wait_event_interruptible(mbim_dev->data_read_wq, atomic_read(&mbim_dev->data_rx_cnt))){

+			printk("[func]:%s,[line]:%d afterwait \n",__func__,__LINE__) ;

+            return -ERESTARTSYS ;

+        }

+     }

+	// printk("mbim_data_read,wakeup \r\n") ;

+	 

+    if(atomic_read(&mbim_dev->lb_flag) == 0){

+		//this means loopback test end

+		return -EINVAL;

+    }

+     spin_lock_irqsave(&mbim_dev->data_lock,flags);

+

+    if(list_empty(&mbim_dev->data_rx_list))

+    {

+        atomic_set(&mbim_dev->data_rx_cnt,0) ;

+        spin_unlock_irqrestore(&mbim_dev->data_lock,flags);

+        printk("[func]:%s,[line]:%d ,data rx list is empty \n",__func__,__LINE__) ;

+        return -EAGAIN ;

+    }

+    struct mbim_pool_data_s * data_chan =  list_first_entry(&mbim_dev->data_rx_list, struct mbim_pool_data_s, list) ;

+    list_del_init(&data_chan->list) ;

+    atomic_dec(&mbim_dev->data_rx_cnt);

+    spin_unlock_irqrestore(&mbim_dev->data_lock,flags);

+    struct sk_buff	*skb = (struct sk_buff	*)(data_chan->pdata) ;

+	if(skb == NULL){

+		

+	printk("mbim_data_read, skb is null\r\n");

+		goto enqueu_idle;

+	}

+    skb_pull(skb, ETH_HLEN);

+	

+	//printk("mbim_data_read, ready read cout = %d ,skb->len:%d \r\n",count , skb->len) ;

+	if(count < skb->len)

+	{

+		printk("\n#####[%s]: notice !!!!!!, ready read cout = %d ,skb->len:%d \r\n",__func__,count , skb->len) ;

+

+	}

+	//printk("mbim_data_read, buf len:%d, skb len:%d, data len:%d, buf:%x\r\n", count, 

+	//skb->len, skb->data_len, buf);

+

+	 rn_cnt = (skb->len > count) ? count : skb->len;

+	// rn_cnt = skb->data_len >count ? count : skb->data_len;

+	copy_ret = copy_to_user(buf, skb->data ,rn_cnt);

+    if(copy_ret)

+    {

+         spin_lock_irqsave(&mbim_dev->data_lock,flags);

+         list_add(&data_chan->list,&mbim_dev->data_rx_list);

+         atomic_inc(&mbim_dev->data_rx_cnt) ;

+         spin_unlock_irqrestore(&mbim_dev->data_lock,flags);

+         dev_kfree_skb_any(skb);

+         printk("[func]:%s,[line]:%d ,copy_to_user failed,ret:%ld \n",__func__,__LINE__,copy_ret ) ;

+         return -EFAULT ;

+    }

+

+    dev_kfree_skb_any(skb);

+enqueu_idle:	

+	data_chan->pdata = NULL;

+	spin_lock_irqsave(&mbim_dev->data_lock,flags);

+	list_add_tail(&data_chan->list,&mbim_dev->data_ilde_list);

+	atomic_inc(&mbim_dev->data_idle_cnt) ;

+	spin_unlock_irqrestore(&mbim_dev->data_lock,flags);

+    return rn_cnt;

+  }

+

+  static ssize_t mbim_data_write(struct file *fp, const char __user *buf,size_t count, loff_t *pos)

+  {

+    //ÅжÏmbimÉ豸ÊÇ·ñÕý³£

+    int cnt = 0;

+    unsigned long       flags;

+	

+    //printk("\n\n--------[func]:%s,[line]:%d write len:%d \n\n",__func__,__LINE__, count) ;

+    struct f_mbim* mbim_dev = (struct f_mbim *)fp->private_data;

+

+	struct gether *link = &mbim_dev->port;

+ 	struct eth_dev *dev = link->ioport;	

+    if(mbim_dev == NULL)

+    {

+         printk("[func]:%s,[line]:%d no dev \n",__func__,__LINE__) ;

+         return -ENODEV ;

+    }

+	

+    

+  //  if(atomic_read(&mbim_dev->data_tx_cnt) >=  32 )

+  //  {

+       if(fp->f_flags& O_NONBLOCK)

+       {

+		   printk("[func]:%s,[line]:%d f_flags& O_NONBLOCK return\n",__func__,__LINE__) ;

+           return -EAGAIN ;

+       }

+	   

+       if(wait_event_interruptible(mbim_dev->data_write_wq, atomic_read(&dev->tx_qlen) == 0)){

+           return -ERESTARTSYS ;

+	    }

+	

+    if(atomic_read(&mbim_dev->lb_flag) == 0){

+		//this means loopback test end

+	printk("[func]:%s,[line]:%d lb_flag is 0 return\n",__func__,__LINE__) ;

+		return 0;

+    }

+    spin_lock_irqsave(&dev->lock, flags);

+	

+    if (list_empty(&dev->tx_reqs))

+    {		

+	    spin_unlock_irqrestore(&dev->lock, flags);

+		printk("[func]:%s,[line]:%d tx_reqs is empty return\n",__func__,__LINE__) ;

+		return -ENOMEM;

+	}

+    spin_unlock_irqrestore(&dev->lock, flags);

+    size_t size = sizeof(struct ethhdr) + mbim_dev->port.ioport->net->mtu + 2 + MBIM_IP_MTU_EXTRA ;

+    struct sk_buff *skb = dev_alloc_skb(size);

+	if (skb == NULL) {

+		printk( "%s :no tx skb\n",__func__);

+		return -ENOMEM;

+	}

+    skb_reserve(skb, 2);

+    skb_pull(skb,sizeof(struct ethhdr));

+    skb_put(skb,count);

+    if(copy_from_user(skb->data,buf,count))

+    {

+        dev_kfree_skb_any(skb);

+        printk( "%s :copy_fom_user failed \n",__func__);

+        return -EFAULT ;

+    }

+	

+    

+	//printk( "%s :now mbim_loop_test_xmit, skb len:%d\r\n",__func__, skb->len);

+    if(mbim_loop_test_xmit(mbim_dev->port.ioport,skb)) 

+    {

+        printk( "%s :send skb failed \n",__func__);

+        return -EFAULT ;

+    }

+    

+    return count ;

+  }

+  

+

+

+   /* file operations for mbim control device /dev/android_mbim_ctrl */

+ static const struct file_operations mbim_ctrl_fops = {

+     .owner = THIS_MODULE,

+     .read = mbim_ctrl_read,

+     .write = mbim_ctrl_write,

+     .open = mbim_ctrl_open,

+     .release = mbim_ctrl_release,

+     .unlocked_ioctl = mbim_ctrl_ioctl ,

+ };

+ 

+ static struct miscdevice mbim_ctrl_device = {

+     .minor = MISC_DYNAMIC_MINOR,

+     .name = MBIM_CTRL_NAME,

+     .fops = &mbim_ctrl_fops,

+ };

+

+ 

+  /* file operations for mbim data  device /dev/android_mbim_data */

+  static const struct file_operations mbim_data_fops = {

+      .owner = THIS_MODULE,

+      .read = mbim_data_read,

+      .write = mbim_data_write,

+      .open = mbim_data_open,

+      .release = mbim_data_release,

+  };

+  

+  static struct miscdevice mbim_data_device = {

+      .minor = MISC_DYNAMIC_MINOR,

+      .name = MBIM_DATA_NAME,

+      .fops = &mbim_data_fops,

+  };

+

+static int mbim_conn_pool_init(struct f_mbim * dev)

+{

+

+	unsigned long flags;

+	

+    if(dev ==NULL)

+    {

+        return -1 ;

+    }

+	if(dev->pool_mem){

+		printk("mbim_conn_pool_init, pool_mem already alloced\n");

+		//panic("pool_mem already alloced\n");

+	}

+    //¿ØÖÆÍ¨µÀ

+    void* pbuf = NULL ;

+    

+    int i = 0 ;

+    printk("[func]:%s,[line]:%d  \n",__func__,__LINE__) ;

+    unsigned int element_aligned_max_size = ((sizeof (struct mbim_pool_ctrl_s)+ MBIM_MAX_CTRL_MSG+7)>>3)<<3; //8×Ö½Ú¶ÔÆë

+    //element_aligned_max_size = sizeof (struct mbim_pool_ctrl_s)+ MBIM_MAX_CTRL_MSG;

+

+    pbuf =  kzalloc(element_aligned_max_size * MBIM_MAX_POOL_NUM*2, GFP_KERNEL);

+    if(pbuf == NULL)

+    {

+        printk("[func]:%s,[line]:%d ,alloc mbim_pool_ctrl_s ,no mem \n",__func__,__LINE__) ;

+        return -ENOMEM;

+    }

+	spin_lock_irqsave(&dev->ctrl_lock,flags);

+	

+    struct mbim_pool_ctrl_s* pentry = ( struct mbim_pool_ctrl_s* )pbuf;

+    for(i = 0 ;i < MBIM_MAX_POOL_NUM*2 ;i++)

+    {

+        pentry->max_size = MBIM_MAX_CTRL_MSG ;

+        list_add_tail(&pentry->list, &dev->idle_list) ;

+        pentry = (struct mbim_pool_ctrl_s* ) ((u8 *)pentry + element_aligned_max_size);

+        atomic_inc(&dev->idle_cnt);

+    }

+    dev->pool_mem = pbuf ;

+	spin_unlock_irqrestore(&dev->ctrl_lock,flags);

+	printk("mbim_conn_pool_init, idle_cnt:%u\n", atomic_read(&dev->idle_cnt));

+    //Êý¾ÝͨµÀ,Ö»·ÖÅärx

+    

+    pbuf =  kzalloc(sizeof(struct mbim_pool_data_s) * MBIM_MAX_POOL_NUM, GFP_KERNEL);

+    if(pbuf == NULL)

+    {

+        printk("[func]:%s,[line]:%d ,alloc mbim_pool_data_s ,no mem \n",__func__,__LINE__) ;

+        INIT_LIST_HEAD(&dev->idle_list );

+        atomic_set(&dev->idle_cnt,0);

+        kfree(dev->pool_mem) ;

+         dev->pool_mem = NULL ;

+        return -ENOMEM;

+    }

+

+	spin_lock_irqsave(&dev->data_lock,flags);	

+    struct mbim_pool_data_s*  data_channel_p =  (struct mbim_pool_data_s*) pbuf ;

+    for(i = 0 ;i < MBIM_MAX_POOL_NUM;i++)

+    {

+        data_channel_p->pdata = NULL;

+        list_add_tail(&data_channel_p->list, &dev->data_ilde_list) ;

+        data_channel_p++ ;

+        atomic_inc(&dev->data_idle_cnt);

+    }

+    dev->data_pool_mem = pbuf ;

+	spin_unlock_irqrestore(&dev->data_lock,flags);

+	   

+    return 0 ;

+}

+

+void mbim_conn_pool_deinit(struct f_mbim * dev)

+{

+	if(dev ==NULL)

+	{

+		return ;

+	}

+	printk("\r\n----------mbim_conn_pool_deinit\r\n");

+	//ÊÍ·Åskb_buf ;

+	//.......

+	if(dev->data_pool_mem){

+		kfree(dev->data_pool_mem) ;

+		dev->data_pool_mem = NULL;

+	}

+	if(dev->pool_mem){

+		kfree(dev->pool_mem) ;

+		dev->pool_mem = NULL ;

+	}

+	atomic_set(&dev->data_tx_cnt,0) ;

+	atomic_set(&dev->data_rx_cnt,0) ;

+	atomic_set(&dev->data_idle_cnt,0) ;

+

+	atomic_set(&dev->ctrl_tx_cnt,0) ;

+	atomic_set(&dev->ctrl_rx_cnt,0) ;

+	atomic_set(&dev->idle_cnt,0) ;

+    INIT_LIST_HEAD(&dev->idle_list );

+    INIT_LIST_HEAD(&dev->ctrl_tx_list );

+    INIT_LIST_HEAD(&dev->ctrl_rx_list );

+	INIT_LIST_HEAD(&dev->data_ilde_list);

+	INIT_LIST_HEAD(&dev->data_rx_list);

+}

+

+ static void mbim_conn_channel_cleanup(void)

+ {

+     //È¥³õʼ»¯ÄÚ´æ³Ø

+     

+     misc_deregister(&mbim_ctrl_device);

+     misc_deregister(&mbim_data_device);

+     

+     g_mbim = NULL;

+ }

+

+int mbim_conn_chanel_init(void)

+ {

+     int ret;

+     

+     struct f_mbim* mbim = kzalloc(sizeof *mbim, GFP_KERNEL);

+	if (!mbim)

+	{

+        printk("[func]:%s,[line]:%d ,alloc struct mbim ,no mem \n",__func__,__LINE__) ;

+        return -ENOMEM;

+    }

+

+

+    //ÓëÓ¦Óò㽻»¥¿ØÖÆÍ¨µÀ

+    atomic_set(&mbim->ctrl_rx_cnt,0) ;

+    atomic_set(&mbim->ctrl_tx_cnt,0) ;

+    atomic_set(&mbim->idle_cnt ,0);

+    spin_lock_init(&mbim->ctrl_lock) ;

+    

+    INIT_LIST_HEAD(&mbim->ctrl_rx_list)  ;

+    INIT_LIST_HEAD(&mbim->ctrl_tx_list)  ;

+    //INIT_LIST_HEAD(&mbim->ctrl_idle_list );

+

+    init_waitqueue_head(&mbim->ctrl_read_wq) ;

+    init_waitqueue_head(&mbim->ctrl_write_wq) ;

+    

+    //ÓëÓ¦ÓòãÊý¾ÝͨµÀ

+    atomic_set(&mbim->data_rx_cnt,0) ;

+    atomic_set(&mbim->data_tx_cnt,0) ;

+	atomic_set(&mbim->data_idle_cnt,0) ;

+    spin_lock_init(&mbim->data_lock) ;

+    INIT_LIST_HEAD(&mbim->data_rx_list) ; 

+    INIT_LIST_HEAD(&mbim->data_ilde_list);

+

+    init_waitqueue_head(&mbim->data_read_wq) ;

+    init_waitqueue_head(&mbim->data_write_wq) ;

+    spin_lock_init(&mbim->conn_lock );

+    INIT_LIST_HEAD(&mbim->idle_list );

+	init_waitqueue_head(&mbim->lp_wait);

+    atomic_set(&mbim->lb_flag,0);

+

+    atomic_set(&mbim->netlink_path_status ,0);

+    atomic_set(&mbim->dss_session_id ,0);

+    atomic_set(&mbim->session_id ,0);

+	mbim->trans_flag = 0;

+		

+    //³õʼ»¯ÄÚ´æ³Ø 

+#if 0

+    ret = mbim_conn_pool_init(mbim) ;

+    if(ret)

+    {

+        printk("[func]:%s,[line]:%d ,init pool failed\n",__func__,__LINE__) ;

+        kfree(mbim);

+        return -ENOMEM ;

+    }

+#endif

+     g_mbim = mbim;

+     ret = misc_register(&mbim_ctrl_device);

+     if (ret)

+     {

+         printk("[func]:%s,[line]:%d ,mbim_ctrl_device failed,return %d\n",__func__,__LINE__ ,ret) ;

+         goto err;

+     }

+

+     ret = misc_register(&mbim_data_device);

+     if(ret)

+     {

+         printk("[func]:%s,[line]:%d ,mbim_ctrl_device failed,return %d\n",__func__,__LINE__ ,ret) ;

+         goto dereg_err;

+     }

+     return 0;

+

+ dereg_err:

+    misc_deregister(&mbim_ctrl_device);

+err:

+    return ret;

+ }

+

+

+

+

+

diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_midi.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_midi.c
new file mode 100644
index 0000000..1bf9596
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_midi.c
@@ -0,0 +1,991 @@
+/*
+ * f_midi.c -- USB MIDI class function driver
+ *
+ * Copyright (C) 2006 Thumtronics Pty Ltd.
+ * Developed for Thumtronics by Grey Innovation
+ * Ben Williamson <ben.williamson@greyinnovation.com>
+ *
+ * Rewritten for the composite framework
+ *   Copyright (C) 2011 Daniel Mack <zonque@gmail.com>
+ *
+ * Based on drivers/usb/gadget/f_audio.c,
+ *   Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ *   Copyright (C) 2008 Analog Devices, Inc
+ *
+ * and drivers/usb/gadget/midi.c,
+ *   Copyright (C) 2006 Thumtronics Pty Ltd.
+ *   Ben Williamson <ben.williamson@greyinnovation.com>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/rawmidi.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/midi.h>
+
+MODULE_AUTHOR("Ben Williamson");
+MODULE_LICENSE("GPL v2");
+
+static const char f_midi_shortname[] = "f_midi";
+static const char f_midi_longname[] = "MIDI Gadget";
+
+/*
+ * We can only handle 16 cables on one single endpoint, as cable numbers are
+ * stored in 4-bit fields. And as the interface currently only holds one
+ * single endpoint, this is the maximum number of ports we can allow.
+ */
+#define MAX_PORTS 16
+
+/*
+ * This is a gadget, and the IN/OUT naming is from the host's perspective.
+ * USB -> OUT endpoint -> rawmidi
+ * USB <- IN endpoint  <- rawmidi
+ */
+struct gmidi_in_port {
+	struct f_midi *midi;
+	int active;
+	uint8_t cable;
+	uint8_t state;
+#define STATE_UNKNOWN	0
+#define STATE_1PARAM	1
+#define STATE_2PARAM_1	2
+#define STATE_2PARAM_2	3
+#define STATE_SYSEX_0	4
+#define STATE_SYSEX_1	5
+#define STATE_SYSEX_2	6
+	uint8_t data[2];
+};
+
+struct f_midi {
+	struct usb_function	func;
+	struct usb_gadget	*gadget;
+	struct usb_ep		*in_ep, *out_ep;
+	struct snd_card		*card;
+	struct snd_rawmidi	*rmidi;
+
+	struct snd_rawmidi_substream *in_substream[MAX_PORTS];
+	struct snd_rawmidi_substream *out_substream[MAX_PORTS];
+	struct gmidi_in_port	*in_port[MAX_PORTS];
+
+	unsigned long		out_triggered;
+	struct tasklet_struct	tasklet;
+	unsigned int in_ports;
+	unsigned int out_ports;
+	int index;
+	char *id;
+	unsigned int buflen, qlen;
+};
+
+static inline struct f_midi *func_to_midi(struct usb_function *f)
+{
+	return container_of(f, struct f_midi, func);
+}
+
+static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
+DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc __initdata = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber =	DYNAMIC */
+	/* .bNumEndpoints =	DYNAMIC */
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+	/* .iInterface =	DYNAMIC */
+};
+
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_1 ac_header_desc __initdata = {
+	.bLength =		UAC_DT_AC_HEADER_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	USB_MS_HEADER,
+	.bcdADC =		cpu_to_le16(0x0100),
+	.wTotalLength =		cpu_to_le16(UAC_DT_AC_HEADER_SIZE(1)),
+	.bInCollection =	1,
+	/* .baInterfaceNr =	DYNAMIC */
+};
+
+/* B.4.1  Standard MS Interface Descriptor */
+static struct usb_interface_descriptor ms_interface_desc __initdata = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber =	DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_MIDISTREAMING,
+	/* .iInterface =	DYNAMIC */
+};
+
+/* B.4.2  Class-Specific MS Interface Descriptor */
+static struct usb_ms_header_descriptor ms_header_desc __initdata = {
+	.bLength =		USB_DT_MS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	USB_MS_HEADER,
+	.bcdMSC =		cpu_to_le16(0x0100),
+	/* .wTotalLength =	DYNAMIC */
+};
+
+/* B.5.1  Standard Bulk OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+/* B.5.2  Class-specific MS Bulk OUT Endpoint Descriptor */
+static struct usb_ms_endpoint_descriptor_16 ms_out_desc = {
+	/* .bLength =		DYNAMIC */
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	USB_MS_GENERAL,
+	/* .bNumEmbMIDIJack =	DYNAMIC */
+	/* .baAssocJackID =	DYNAMIC */
+};
+
+/* B.6.1  Standard Bulk IN Endpoint Descriptor */
+static struct usb_endpoint_descriptor bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+/* B.6.2  Class-specific MS Bulk IN Endpoint Descriptor */
+static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
+	/* .bLength =		DYNAMIC */
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	USB_MS_GENERAL,
+	/* .bNumEmbMIDIJack =	DYNAMIC */
+	/* .baAssocJackID =	DYNAMIC */
+};
+
+/* string IDs are assigned dynamically */
+
+#define STRING_FUNC_IDX			0
+
+static struct usb_string midi_string_defs[] = {
+	[STRING_FUNC_IDX].s = "MIDI function",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings midi_stringtab = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= midi_string_defs,
+};
+
+static struct usb_gadget_strings *midi_strings[] = {
+	&midi_stringtab,
+	NULL,
+};
+
+static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+	if (req) {
+		req->length = length;
+		req->buf = kmalloc(length, GFP_ATOMIC);
+		if (!req->buf) {
+			usb_ep_free_request(ep, req);
+			req = NULL;
+		}
+	}
+	return req;
+}
+
+static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static const uint8_t f_midi_cin_length[] = {
+	0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
+};
+
+/*
+ * Receives a chunk of MIDI data.
+ */
+static void f_midi_read_data(struct usb_ep *ep, int cable,
+			     uint8_t *data, int length)
+{
+	struct f_midi *midi = ep->driver_data;
+	struct snd_rawmidi_substream *substream = midi->out_substream[cable];
+
+	if (!substream)
+		/* Nobody is listening - throw it on the floor. */
+		return;
+
+	if (!test_bit(cable, &midi->out_triggered))
+		return;
+
+	snd_rawmidi_receive(substream, data, length);
+}
+
+static void f_midi_handle_out_data(struct usb_ep *ep, struct usb_request *req)
+{
+	unsigned int i;
+	u8 *buf = req->buf;
+
+	for (i = 0; i + 3 < req->actual; i += 4)
+		if (buf[i] != 0) {
+			int cable = buf[i] >> 4;
+			int length = f_midi_cin_length[buf[i] & 0x0f];
+			f_midi_read_data(ep, cable, &buf[i + 1], length);
+		}
+}
+
+static void
+f_midi_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_midi *midi = ep->driver_data;
+	struct usb_composite_dev *cdev = midi->func.config->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case 0:			 /* normal completion */
+		if (ep == midi->out_ep) {
+			/* We received stuff. req is queued again, below */
+			f_midi_handle_out_data(ep, req);
+		} else if (ep == midi->in_ep) {
+			/* Our transmit completed. See if there's more to go.
+			 * f_midi_transmit eats req, don't queue it again. */
+			f_midi_transmit(midi, req);
+			return;
+		}
+		break;
+
+	/* this endpoint is normally active while we're configured */
+	case -ECONNABORTED:	/* hardware forced ep reset */
+	case -ECONNRESET:	/* request dequeued */
+	case -ESHUTDOWN:	/* disconnect from host */
+		VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status,
+				req->actual, req->length);
+		if (ep == midi->out_ep)
+			f_midi_handle_out_data(ep, req);
+
+		free_ep_req(ep, req);
+		return;
+
+	case -EOVERFLOW:	/* buffer overrun on read means that
+				 * we didn't provide a big enough buffer.
+				 */
+	default:
+		DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name,
+				status, req->actual, req->length);
+		break;
+	case -EREMOTEIO:	/* short read */
+		break;
+	}
+
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (status) {
+		ERROR(cdev, "kill %s:  resubmit %d bytes --> %d\n",
+				ep->name, req->length, status);
+		usb_ep_set_halt(ep);
+		/* FIXME recover later ... somehow */
+	}
+}
+
+static int f_midi_start_ep(struct f_midi *midi,
+			   struct usb_function *f,
+			   struct usb_ep *ep)
+{
+	int err;
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	if (ep->driver_data)
+		usb_ep_disable(ep);
+
+	err = config_ep_by_speed(midi->gadget, f, ep);
+	if (err) {
+		ERROR(cdev, "can't configure %s: %d\n", ep->name, err);
+		return err;
+	}
+
+	err = usb_ep_enable(ep);
+	if (err) {
+		ERROR(cdev, "can't start %s: %d\n", ep->name, err);
+		return err;
+	}
+
+	ep->driver_data = midi;
+
+	return 0;
+}
+
+static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_midi *midi = func_to_midi(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	unsigned i;
+	int err;
+
+	err = f_midi_start_ep(midi, f, midi->in_ep);
+	if (err)
+		return err;
+
+	err = f_midi_start_ep(midi, f, midi->out_ep);
+	if (err)
+		return err;
+
+	if (midi->out_ep->driver_data)
+		usb_ep_disable(midi->out_ep);
+
+	err = config_ep_by_speed(midi->gadget, f, midi->out_ep);
+	if (err) {
+		ERROR(cdev, "can't configure %s: %d\n",
+		      midi->out_ep->name, err);
+		return err;
+	}
+
+	err = usb_ep_enable(midi->out_ep);
+	if (err) {
+		ERROR(cdev, "can't start %s: %d\n",
+		      midi->out_ep->name, err);
+		return err;
+	}
+
+	midi->out_ep->driver_data = midi;
+
+	/* allocate a bunch of read buffers and queue them all at once. */
+	for (i = 0; i < midi->qlen && err == 0; i++) {
+		struct usb_request *req =
+			alloc_ep_req(midi->out_ep, midi->buflen);
+		if (req == NULL)
+			return -ENOMEM;
+
+		req->complete = f_midi_complete;
+		err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
+		if (err) {
+			ERROR(midi, "%s queue req: %d\n",
+				    midi->out_ep->name, err);
+		}
+	}
+
+	return 0;
+}
+
+static void f_midi_disable(struct usb_function *f)
+{
+	struct f_midi *midi = func_to_midi(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "disable\n");
+
+	/*
+	 * just disable endpoints, forcing completion of pending i/o.
+	 * all our completion handlers free their requests in this case.
+	 */
+	usb_ep_disable(midi->in_ep);
+	usb_ep_disable(midi->out_ep);
+}
+
+static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct f_midi *midi = func_to_midi(f);
+	struct snd_card *card;
+
+	DBG(cdev, "unbind\n");
+
+	/* just to be sure */
+	f_midi_disable(f);
+
+	card = midi->card;
+	midi->card = NULL;
+	if (card)
+		snd_card_free(card);
+
+	kfree(midi->id);
+	midi->id = NULL;
+
+	usb_free_descriptors(f->descriptors);
+	usb_free_descriptors(f->hs_descriptors);
+	kfree(midi);
+}
+
+static int f_midi_snd_free(struct snd_device *device)
+{
+	return 0;
+}
+
+static void f_midi_transmit_packet(struct usb_request *req, uint8_t p0,
+					uint8_t p1, uint8_t p2, uint8_t p3)
+{
+	unsigned length = req->length;
+	u8 *buf = (u8 *)req->buf + length;
+
+	buf[0] = p0;
+	buf[1] = p1;
+	buf[2] = p2;
+	buf[3] = p3;
+	req->length = length + 4;
+}
+
+/*
+ * Converts MIDI commands to USB MIDI packets.
+ */
+static void f_midi_transmit_byte(struct usb_request *req,
+				 struct gmidi_in_port *port, uint8_t b)
+{
+	uint8_t p0 = port->cable << 4;
+
+	if (b >= 0xf8) {
+		f_midi_transmit_packet(req, p0 | 0x0f, b, 0, 0);
+	} else if (b >= 0xf0) {
+		switch (b) {
+		case 0xf0:
+			port->data[0] = b;
+			port->state = STATE_SYSEX_1;
+			break;
+		case 0xf1:
+		case 0xf3:
+			port->data[0] = b;
+			port->state = STATE_1PARAM;
+			break;
+		case 0xf2:
+			port->data[0] = b;
+			port->state = STATE_2PARAM_1;
+			break;
+		case 0xf4:
+		case 0xf5:
+			port->state = STATE_UNKNOWN;
+			break;
+		case 0xf6:
+			f_midi_transmit_packet(req, p0 | 0x05, 0xf6, 0, 0);
+			port->state = STATE_UNKNOWN;
+			break;
+		case 0xf7:
+			switch (port->state) {
+			case STATE_SYSEX_0:
+				f_midi_transmit_packet(req,
+					p0 | 0x05, 0xf7, 0, 0);
+				break;
+			case STATE_SYSEX_1:
+				f_midi_transmit_packet(req,
+					p0 | 0x06, port->data[0], 0xf7, 0);
+				break;
+			case STATE_SYSEX_2:
+				f_midi_transmit_packet(req,
+					p0 | 0x07, port->data[0],
+					port->data[1], 0xf7);
+				break;
+			}
+			port->state = STATE_UNKNOWN;
+			break;
+		}
+	} else if (b >= 0x80) {
+		port->data[0] = b;
+		if (b >= 0xc0 && b <= 0xdf)
+			port->state = STATE_1PARAM;
+		else
+			port->state = STATE_2PARAM_1;
+	} else { /* b < 0x80 */
+		switch (port->state) {
+		case STATE_1PARAM:
+			if (port->data[0] < 0xf0) {
+				p0 |= port->data[0] >> 4;
+			} else {
+				p0 |= 0x02;
+				port->state = STATE_UNKNOWN;
+			}
+			f_midi_transmit_packet(req, p0, port->data[0], b, 0);
+			break;
+		case STATE_2PARAM_1:
+			port->data[1] = b;
+			port->state = STATE_2PARAM_2;
+			break;
+		case STATE_2PARAM_2:
+			if (port->data[0] < 0xf0) {
+				p0 |= port->data[0] >> 4;
+				port->state = STATE_2PARAM_1;
+			} else {
+				p0 |= 0x03;
+				port->state = STATE_UNKNOWN;
+			}
+			f_midi_transmit_packet(req,
+				p0, port->data[0], port->data[1], b);
+			break;
+		case STATE_SYSEX_0:
+			port->data[0] = b;
+			port->state = STATE_SYSEX_1;
+			break;
+		case STATE_SYSEX_1:
+			port->data[1] = b;
+			port->state = STATE_SYSEX_2;
+			break;
+		case STATE_SYSEX_2:
+			f_midi_transmit_packet(req,
+				p0 | 0x04, port->data[0], port->data[1], b);
+			port->state = STATE_SYSEX_0;
+			break;
+		}
+	}
+}
+
+static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
+{
+	struct usb_ep *ep = midi->in_ep;
+	int i;
+
+	if (!ep)
+		return;
+
+	if (!req)
+		req = alloc_ep_req(ep, midi->buflen);
+
+	if (!req) {
+		ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
+		return;
+	}
+	req->length = 0;
+	req->complete = f_midi_complete;
+
+	for (i = 0; i < MAX_PORTS; i++) {
+		struct gmidi_in_port *port = midi->in_port[i];
+		struct snd_rawmidi_substream *substream = midi->in_substream[i];
+
+		if (!port || !port->active || !substream)
+			continue;
+
+		while (req->length + 3 < midi->buflen) {
+			uint8_t b;
+			if (snd_rawmidi_transmit(substream, &b, 1) != 1) {
+				port->active = 0;
+				break;
+			}
+			f_midi_transmit_byte(req, port, b);
+		}
+	}
+
+	if (req->length > 0)
+		usb_ep_queue(ep, req, GFP_ATOMIC);
+	else
+		free_ep_req(ep, req);
+}
+
+static void f_midi_in_tasklet(unsigned long data)
+{
+	struct f_midi *midi = (struct f_midi *) data;
+	f_midi_transmit(midi, NULL);
+}
+
+static int f_midi_in_open(struct snd_rawmidi_substream *substream)
+{
+	struct f_midi *midi = substream->rmidi->private_data;
+
+	if (!midi->in_port[substream->number])
+		return -EINVAL;
+
+	VDBG(midi, "%s()\n", __func__);
+	midi->in_substream[substream->number] = substream;
+	midi->in_port[substream->number]->state = STATE_UNKNOWN;
+	return 0;
+}
+
+static int f_midi_in_close(struct snd_rawmidi_substream *substream)
+{
+	struct f_midi *midi = substream->rmidi->private_data;
+
+	VDBG(midi, "%s()\n", __func__);
+	return 0;
+}
+
+static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+	struct f_midi *midi = substream->rmidi->private_data;
+
+	if (!midi->in_port[substream->number])
+		return;
+
+	VDBG(midi, "%s() %d\n", __func__, up);
+	midi->in_port[substream->number]->active = up;
+	if (up)
+		tasklet_hi_schedule(&midi->tasklet);
+}
+
+static int f_midi_out_open(struct snd_rawmidi_substream *substream)
+{
+	struct f_midi *midi = substream->rmidi->private_data;
+
+	if (substream->number >= MAX_PORTS)
+		return -EINVAL;
+
+	VDBG(midi, "%s()\n", __func__);
+	midi->out_substream[substream->number] = substream;
+	return 0;
+}
+
+static int f_midi_out_close(struct snd_rawmidi_substream *substream)
+{
+	struct f_midi *midi = substream->rmidi->private_data;
+
+	VDBG(midi, "%s()\n", __func__);
+	return 0;
+}
+
+static void f_midi_out_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+	struct f_midi *midi = substream->rmidi->private_data;
+
+	VDBG(midi, "%s()\n", __func__);
+
+	if (up)
+		set_bit(substream->number, &midi->out_triggered);
+	else
+		clear_bit(substream->number, &midi->out_triggered);
+}
+
+static struct snd_rawmidi_ops gmidi_in_ops = {
+	.open = f_midi_in_open,
+	.close = f_midi_in_close,
+	.trigger = f_midi_in_trigger,
+};
+
+static struct snd_rawmidi_ops gmidi_out_ops = {
+	.open = f_midi_out_open,
+	.close = f_midi_out_close,
+	.trigger = f_midi_out_trigger
+};
+
+/* register as a sound "card" */
+static int f_midi_register_card(struct f_midi *midi)
+{
+	struct snd_card *card;
+	struct snd_rawmidi *rmidi;
+	int err;
+	static struct snd_device_ops ops = {
+		.dev_free = f_midi_snd_free,
+	};
+
+	err = snd_card_create(midi->index, midi->id, THIS_MODULE, 0, &card);
+	if (err < 0) {
+		ERROR(midi, "snd_card_create() failed\n");
+		goto fail;
+	}
+	midi->card = card;
+
+	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, midi, &ops);
+	if (err < 0) {
+		ERROR(midi, "snd_device_new() failed: error %d\n", err);
+		goto fail;
+	}
+
+	strcpy(card->driver, f_midi_longname);
+	strcpy(card->longname, f_midi_longname);
+	strcpy(card->shortname, f_midi_shortname);
+
+	/* Set up rawmidi */
+	snd_component_add(card, "MIDI");
+	err = snd_rawmidi_new(card, card->longname, 0,
+			      midi->out_ports, midi->in_ports, &rmidi);
+	if (err < 0) {
+		ERROR(midi, "snd_rawmidi_new() failed: error %d\n", err);
+		goto fail;
+	}
+	midi->rmidi = rmidi;
+	strcpy(rmidi->name, card->shortname);
+	rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
+			    SNDRV_RAWMIDI_INFO_INPUT |
+			    SNDRV_RAWMIDI_INFO_DUPLEX;
+	rmidi->private_data = midi;
+
+	/*
+	 * Yes, rawmidi OUTPUT = USB IN, and rawmidi INPUT = USB OUT.
+	 * It's an upside-down world being a gadget.
+	 */
+	snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops);
+	snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops);
+
+	snd_card_set_dev(card, &midi->gadget->dev);
+
+	/* register it - we're ready to go */
+	err = snd_card_register(card);
+	if (err < 0) {
+		ERROR(midi, "snd_card_register() failed\n");
+		goto fail;
+	}
+
+	VDBG(midi, "%s() finished ok\n", __func__);
+	return 0;
+
+fail:
+	if (midi->card) {
+		snd_card_free(midi->card);
+		midi->card = NULL;
+	}
+	return err;
+}
+
+/* MIDI function driver setup/binding */
+
+static int __init
+f_midi_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_descriptor_header **midi_function;
+	struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
+	struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
+	struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
+	struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_midi *midi = func_to_midi(f);
+	int status, n, jack = 1, i = 0;
+
+	/* maybe allocate device-global string ID */
+	if (midi_string_defs[0].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			goto fail;
+		midi_string_defs[0].id = status;
+	}
+
+	/* We have two interfaces, AudioControl and MIDIStreaming */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ac_interface_desc.bInterfaceNumber = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ms_interface_desc.bInterfaceNumber = status;
+	ac_header_desc.baInterfaceNr[0] = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
+	if (!midi->in_ep)
+		goto fail;
+	midi->in_ep->driver_data = cdev;	/* claim */
+
+	midi->out_ep = usb_ep_autoconfig(cdev->gadget, &bulk_out_desc);
+	if (!midi->out_ep)
+		goto fail;
+	midi->out_ep->driver_data = cdev;	/* claim */
+
+	/* allocate temporary function list */
+	midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(*midi_function),
+				GFP_KERNEL);
+	if (!midi_function) {
+		status = -ENOMEM;
+		goto fail;
+	}
+
+	/*
+	 * construct the function's descriptor set. As the number of
+	 * input and output MIDI ports is configurable, we have to do
+	 * it that way.
+	 */
+
+	/* add the headers - these are always the same */
+	midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc;
+	midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc;
+	midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc;
+
+	/* calculate the header's wTotalLength */
+	n = USB_DT_MS_HEADER_SIZE
+		+ (midi->in_ports + midi->out_ports) *
+			(USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
+	ms_header_desc.wTotalLength = cpu_to_le16(n);
+
+	midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
+
+	/* configure the external IN jacks, each linked to an embedded OUT jack */
+	for (n = 0; n < midi->in_ports; n++) {
+		struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n];
+		struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n];
+
+		in_ext->bLength			= USB_DT_MIDI_IN_SIZE;
+		in_ext->bDescriptorType		= USB_DT_CS_INTERFACE;
+		in_ext->bDescriptorSubtype	= USB_MS_MIDI_IN_JACK;
+		in_ext->bJackType		= USB_MS_EXTERNAL;
+		in_ext->bJackID			= jack++;
+		in_ext->iJack			= 0;
+		midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+
+		out_emb->bLength		= USB_DT_MIDI_OUT_SIZE(1);
+		out_emb->bDescriptorType	= USB_DT_CS_INTERFACE;
+		out_emb->bDescriptorSubtype	= USB_MS_MIDI_OUT_JACK;
+		out_emb->bJackType		= USB_MS_EMBEDDED;
+		out_emb->bJackID		= jack++;
+		out_emb->bNrInputPins		= 1;
+		out_emb->pins[0].baSourcePin	= 1;
+		out_emb->pins[0].baSourceID	= in_ext->bJackID;
+		out_emb->iJack			= 0;
+		midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+
+		/* link it to the endpoint */
+		ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
+	}
+
+	/* configure the external OUT jacks, each linked to an embedded IN jack */
+	for (n = 0; n < midi->out_ports; n++) {
+		struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n];
+		struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n];
+
+		in_emb->bLength			= USB_DT_MIDI_IN_SIZE;
+		in_emb->bDescriptorType		= USB_DT_CS_INTERFACE;
+		in_emb->bDescriptorSubtype	= USB_MS_MIDI_IN_JACK;
+		in_emb->bJackType		= USB_MS_EMBEDDED;
+		in_emb->bJackID			= jack++;
+		in_emb->iJack			= 0;
+		midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+
+		out_ext->bLength =		USB_DT_MIDI_OUT_SIZE(1);
+		out_ext->bDescriptorType =	USB_DT_CS_INTERFACE;
+		out_ext->bDescriptorSubtype =	USB_MS_MIDI_OUT_JACK;
+		out_ext->bJackType =		USB_MS_EXTERNAL;
+		out_ext->bJackID =		jack++;
+		out_ext->bNrInputPins =		1;
+		out_ext->iJack =		0;
+		out_ext->pins[0].baSourceID =	in_emb->bJackID;
+		out_ext->pins[0].baSourcePin =	1;
+		midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+
+		/* link it to the endpoint */
+		ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
+	}
+
+	/* configure the endpoint descriptors ... */
+	ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
+	ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
+
+	ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
+	ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
+
+	/* ... and add them to the list */
+	midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
+	midi_function[i++] = (struct usb_descriptor_header *) &ms_out_desc;
+	midi_function[i++] = (struct usb_descriptor_header *) &bulk_in_desc;
+	midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc;
+	midi_function[i++] = NULL;
+
+	/*
+	 * support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	/* copy descriptors, and track endpoint copies */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		c->highspeed = true;
+		bulk_in_desc.wMaxPacketSize = cpu_to_le16(512);
+		bulk_out_desc.wMaxPacketSize = cpu_to_le16(512);
+		f->hs_descriptors = usb_copy_descriptors(midi_function);
+	} else {
+		f->descriptors = usb_copy_descriptors(midi_function);
+	}
+
+	kfree(midi_function);
+
+	return 0;
+
+fail:
+	/* we might as well release our claims on endpoints */
+	if (midi->out_ep)
+		midi->out_ep->driver_data = NULL;
+	if (midi->in_ep)
+		midi->in_ep->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+/**
+ * f_midi_bind_config - add USB MIDI function to a configuration
+ * @c: the configuration to supcard the USB audio function
+ * @index: the soundcard index to use for the ALSA device creation
+ * @id: the soundcard id to use for the ALSA device creation
+ * @buflen: the buffer length to use
+ * @qlen the number of read requests to pre-allocate
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ */
+int __init f_midi_bind_config(struct usb_configuration *c,
+			      int index, char *id,
+			      unsigned int in_ports,
+			      unsigned int out_ports,
+			      unsigned int buflen,
+			      unsigned int qlen)
+{
+	struct f_midi *midi;
+	int status, i;
+
+	/* sanity check */
+	if (in_ports > MAX_PORTS || out_ports > MAX_PORTS)
+		return -EINVAL;
+
+	/* allocate and initialize one new instance */
+	midi = kzalloc(sizeof *midi, GFP_KERNEL);
+	if (!midi) {
+		status = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < in_ports; i++) {
+		struct gmidi_in_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+		if (!port) {
+			status = -ENOMEM;
+			goto setup_fail;
+		}
+
+		port->midi = midi;
+		port->active = 0;
+		port->cable = i;
+		midi->in_port[i] = port;
+	}
+
+	midi->gadget = c->cdev->gadget;
+	tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
+
+	/* set up ALSA midi devices */
+	midi->in_ports = in_ports;
+	midi->out_ports = out_ports;
+	status = f_midi_register_card(midi);
+	if (status < 0)
+		goto setup_fail;
+
+	midi->func.name        = "gmidi function";
+	midi->func.strings     = midi_strings;
+	midi->func.bind        = f_midi_bind;
+	midi->func.unbind      = f_midi_unbind;
+	midi->func.set_alt     = f_midi_set_alt;
+	midi->func.disable     = f_midi_disable;
+
+	midi->id = kstrdup(id, GFP_KERNEL);
+	midi->index = index;
+	midi->buflen = buflen;
+	midi->qlen = qlen;
+
+	status = usb_add_function(c, &midi->func);
+	if (status)
+		goto setup_fail;
+
+	return 0;
+
+setup_fail:
+	for (--i; i >= 0; i--)
+		kfree(midi->in_port[i]);
+	kfree(midi);
+fail:
+	return status;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mtp.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mtp.c
new file mode 100644
index 0000000..1638977
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_mtp.c
@@ -0,0 +1,1283 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+
+#define MTP_BULK_BUFFER_SIZE       16384
+#define INTR_BUFFER_SIZE           28
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE               0   /* initial state, disconnected */
+#define STATE_READY                 1   /* ready for userspace calls */
+#define STATE_BUSY                  2   /* processing userspace calls */
+#define STATE_CANCELED              3   /* transaction canceled by host */
+#define STATE_ERROR                 4   /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID   0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL              0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA  0x65
+#define MTP_REQ_RESET               0x66
+#define MTP_REQ_GET_DEVICE_STATUS   0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK             0x2001
+#define MTP_RESPONSE_DEVICE_BUSY    0x2019
+
+static const char mtp_shortname[] = "mtp_usb";
+
+struct mtp_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+	struct usb_ep *ep_intr;
+
+	int state;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+	/* to enforce only one ioctl at a time */
+	atomic_t ioctl_excl;
+
+	struct list_head tx_idle;
+	struct list_head intr_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	wait_queue_head_t intr_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+	 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+	 */
+	struct workqueue_struct *wq;
+	struct work_struct send_file_work;
+	struct work_struct receive_file_work;
+	struct file *xfer_file;
+	loff_t xfer_file_offset;
+	int64_t xfer_file_length;
+	unsigned xfer_send_header;
+	uint16_t xfer_command;
+	uint32_t xfer_transaction_id;
+	int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_STILL_IMAGE,
+	.bInterfaceSubClass     = 1,
+	.bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+	.bInterval              = 6,
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+	/* Naming interface "MTP" so libmtp will recognize us */
+	[INTERFACE_STRING_INDEX].s	= "MTP",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+	&mtp_string_table,
+	NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+	18, /* sizeof(mtp_os_string) */
+	USB_DT_STRING,
+	/* Signature field: "MSFT100" */
+	'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+	/* vendor code */
+	1,
+	/* padding */
+	0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+	__le32	dwLength;
+	__u16	bcdVersion;
+	__le16	wIndex;
+	__u8	bCount;
+	__u8	reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+	__u8	bFirstInterfaceNumber;
+	__u8	bInterfaceCount;
+	__u8	compatibleID[8];
+	__u8	subCompatibleID[8];
+	__u8	reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+	struct mtp_ext_config_desc_header	header;
+	struct mtp_ext_config_desc_function    function;
+} mtp_ext_config_desc = {
+	.header = {
+		.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+		.bcdVersion = __constant_cpu_to_le16(0x0100),
+		.wIndex = __constant_cpu_to_le16(4),
+		.bCount = __constant_cpu_to_le16(1),
+	},
+	.function = {
+		.bFirstInterfaceNumber = 0,
+		.bInterfaceCount = 1,
+		.compatibleID = { 'M', 'T', 'P' },
+	},
+};
+
+struct mtp_device_status {
+	__le16	wLength;
+	__le16	wCode;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+	return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	dev->rx_done = 1;
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->intr_idle, req);
+
+	wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc,
+				struct usb_endpoint_descriptor *intr_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_intr = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_in;
+		mtp_req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_out;
+		dev->rx_req[i] = req;
+	}
+	for (i = 0; i < INTR_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_intr;
+		mtp_req_put(dev, &dev->intr_idle, req);
+	}
+
+	return 0;
+
+fail:
+	printk(KERN_ERR "mtp_bind() could not allocate requests\n");
+	return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret = 0;
+
+	DBG(cdev, "mtp_read(%d)\n", count);
+
+	if (count > MTP_BULK_BUFFER_SIZE)
+		return -EINVAL;
+
+	/* we will block until we're online */
+	DBG(cdev, "mtp_read: waiting for online state\n");
+	ret = wait_event_interruptible(dev->read_wq,
+		dev->state != STATE_OFFLINE);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		DBG(cdev, "rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		goto done;
+	}
+	if (dev->state == STATE_BUSY) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		DBG(cdev, "rx %p %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "mtp_read returning %d\n", r);
+	return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int sendZLP = 0;
+	int ret;
+
+	DBG(cdev, "mtp_write(%d)\n", count);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		return -ENODEV;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+		sendZLP = 1;
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		if (dev->state != STATE_BUSY) {
+			DBG(cdev, "mtp_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = mtp_req_get(dev, &dev->tx_idle))
+				|| dev->state != STATE_BUSY));
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > MTP_BULK_BUFFER_SIZE)
+			xfer = MTP_BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+		if (xfer && copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "mtp_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "mtp_write returning %d\n", r);
+	return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+	struct mtp_dev *dev = container_of(data, struct mtp_dev,
+						send_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	struct mtp_data_header *header;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int xfer, ret, hdr_size;
+	int r = 0;
+	int sendZLP = 0;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+	if (dev->xfer_send_header) {
+		hdr_size = sizeof(struct mtp_data_header);
+		count += hdr_size;
+	} else {
+		hdr_size = 0;
+	}
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+		sendZLP = 1;
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			(req = mtp_req_get(dev, &dev->tx_idle))
+			|| dev->state != STATE_BUSY);
+		if (dev->state == STATE_CANCELED) {
+			r = -ECANCELED;
+			break;
+		}
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > MTP_BULK_BUFFER_SIZE)
+			xfer = MTP_BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+
+		if (hdr_size) {
+			/* prepend MTP data header */
+			header = (struct mtp_data_header *)req->buf;
+			header->length = __cpu_to_le32(count);
+			header->type = __cpu_to_le16(2); /* data packet */
+			header->command = __cpu_to_le16(dev->xfer_command);
+			header->transaction_id =
+					__cpu_to_le32(dev->xfer_transaction_id);
+		}
+
+		ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+								&offset);
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+		xfer = ret + hdr_size;
+		hdr_size = 0;
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "send_file_work: xfer error %d\n", ret);
+			dev->state = STATE_ERROR;
+			r = -EIO;
+			break;
+		}
+
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	DBG(cdev, "send_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+	struct mtp_dev *dev = container_of(data, struct mtp_dev,
+						receive_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *read_req = NULL, *write_req = NULL;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int ret, cur_buf = 0;
+	int r = 0;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "receive_file_work(%lld)\n", count);
+
+	while (count > 0 || write_req) {
+		if (count > 0) {
+			/* queue a request */
+			read_req = dev->rx_req[cur_buf];
+			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+			read_req->length = (count > MTP_BULK_BUFFER_SIZE
+					? MTP_BULK_BUFFER_SIZE : count);
+			dev->rx_done = 0;
+			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+			if (ret < 0) {
+				r = -EIO;
+				dev->state = STATE_ERROR;
+				break;
+			}
+		}
+
+		if (write_req) {
+			DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+			ret = vfs_write(filp, write_req->buf, write_req->actual,
+				&offset);
+			DBG(cdev, "vfs_write %d\n", ret);
+			if (ret != write_req->actual) {
+				r = -EIO;
+				dev->state = STATE_ERROR;
+				break;
+			}
+			write_req = NULL;
+		}
+
+		if (read_req) {
+			/* wait for our last read to complete */
+			ret = wait_event_interruptible(dev->read_wq,
+				dev->rx_done || dev->state != STATE_BUSY);
+			if (dev->state == STATE_CANCELED) {
+				r = -ECANCELED;
+				if (!dev->rx_done)
+					usb_ep_dequeue(dev->ep_out, read_req);
+				break;
+			}
+			/* if xfer_file_length is 0xFFFFFFFF, then we read until
+			 * we get a zero length packet
+			 */
+			if (count != 0xFFFFFFFF)
+				count -= read_req->actual;
+			if (read_req->actual < read_req->length) {
+				/*
+				 * short packet is used to signal EOF for
+				 * sizes > 4 gig
+				 */
+				DBG(cdev, "got short packet\n");
+				count = 0;
+			}
+
+			write_req = read_req;
+			read_req = NULL;
+		}
+	}
+
+	DBG(cdev, "receive_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+	struct usb_request *req = NULL;
+	int ret;
+	int length = event->length;
+
+	DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
+
+	if (length < 0 || length > INTR_BUFFER_SIZE)
+		return -EINVAL;
+	if (dev->state == STATE_OFFLINE)
+		return -ENODEV;
+
+	ret = wait_event_interruptible_timeout(dev->intr_wq,
+			(req = mtp_req_get(dev, &dev->intr_idle)),
+			msecs_to_jiffies(1000));
+	if (!req)
+		return -ETIME;
+
+	if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+		mtp_req_put(dev, &dev->intr_idle, req);
+		return -EFAULT;
+	}
+	req->length = length;
+	ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+	if (ret)
+		mtp_req_put(dev, &dev->intr_idle, req);
+
+	return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct file *filp = NULL;
+	int ret = -EINVAL;
+
+	if (mtp_lock(&dev->ioctl_excl))
+		return -EBUSY;
+
+	switch (code) {
+	case MTP_SEND_FILE:
+	case MTP_RECEIVE_FILE:
+	case MTP_SEND_FILE_WITH_HEADER:
+	{
+		struct mtp_file_range	mfr;
+		struct work_struct *work;
+
+		spin_lock_irq(&dev->lock);
+		if (dev->state == STATE_CANCELED) {
+			/* report cancelation to userspace */
+			dev->state = STATE_READY;
+			spin_unlock_irq(&dev->lock);
+			ret = -ECANCELED;
+			goto out;
+		}
+		if (dev->state == STATE_OFFLINE) {
+			spin_unlock_irq(&dev->lock);
+			ret = -ENODEV;
+			goto out;
+		}
+		dev->state = STATE_BUSY;
+		spin_unlock_irq(&dev->lock);
+
+		if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		/* hold a reference to the file while we are working with it */
+		filp = fget(mfr.fd);
+		if (!filp) {
+			ret = -EBADF;
+			goto fail;
+		}
+
+		/* write the parameters */
+		dev->xfer_file = filp;
+		dev->xfer_file_offset = mfr.offset;
+		dev->xfer_file_length = mfr.length;
+		smp_wmb();
+
+		if (code == MTP_SEND_FILE_WITH_HEADER) {
+			work = &dev->send_file_work;
+			dev->xfer_send_header = 1;
+			dev->xfer_command = mfr.command;
+			dev->xfer_transaction_id = mfr.transaction_id;
+		} else if (code == MTP_SEND_FILE) {
+			work = &dev->send_file_work;
+			dev->xfer_send_header = 0;
+		} else {
+			work = &dev->receive_file_work;
+		}
+
+		/* We do the file transfer on a work queue so it will run
+		 * in kernel context, which is necessary for vfs_read and
+		 * vfs_write to use our buffers in the kernel address space.
+		 */
+		queue_work(dev->wq, work);
+		/* wait for operation to complete */
+		flush_workqueue(dev->wq);
+		fput(filp);
+
+		/* read the result */
+		smp_rmb();
+		ret = dev->xfer_result;
+		break;
+	}
+	case MTP_SEND_EVENT:
+	{
+		struct mtp_event	event;
+		/* return here so we don't change dev->state below,
+		 * which would interfere with bulk transfer state.
+		 */
+		if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+			ret = -EFAULT;
+		else
+			ret = mtp_send_event(dev, &event);
+		goto out;
+	}
+	}
+
+fail:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		ret = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+out:
+	mtp_unlock(&dev->ioctl_excl);
+	DBG(dev->cdev, "ioctl returning %d\n", ret);
+	return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_open\n");
+	if (mtp_lock(&_mtp_dev->open_excl))
+		return -EBUSY;
+
+	/* clear any error condition */
+	if (_mtp_dev->state != STATE_OFFLINE)
+		_mtp_dev->state = STATE_READY;
+
+	fp->private_data = _mtp_dev;
+	return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_release\n");
+
+	mtp_unlock(&_mtp_dev->open_excl);
+	return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+	.owner = THIS_MODULE,
+	.read = mtp_read,
+	.write = mtp_write,
+	.unlocked_ioctl = mtp_ioctl,
+	.open = mtp_open,
+	.release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = mtp_shortname,
+	.fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long	flags;
+
+	VDBG(cdev, "mtp_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	/* Handle MTP OS string */
+	if (ctrl->bRequestType ==
+			(USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+			&& ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+			&& (w_value >> 8) == USB_DT_STRING
+			&& (w_value & 0xFF) == MTP_OS_STRING_ID) {
+		value = (w_length < sizeof(mtp_os_string)
+				? w_length : sizeof(mtp_os_string));
+		memcpy(cdev->req->buf, mtp_os_string, value);
+	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+		/* Handle MTP OS descriptor */
+		DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == 1
+				&& (ctrl->bRequestType & USB_DIR_IN)
+				&& (w_index == 4 || w_index == 5)) {
+			value = (w_length < sizeof(mtp_ext_config_desc) ?
+					w_length : sizeof(mtp_ext_config_desc));
+			memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+		}
+	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+				&& w_value == 0) {
+			DBG(cdev, "MTP_REQ_CANCEL\n");
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->state == STATE_BUSY) {
+				dev->state = STATE_CANCELED;
+				wake_up(&dev->read_wq);
+				wake_up(&dev->write_wq);
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			/* We need to queue a request to read the remaining
+			 *  bytes, but we don't actually need to look at
+			 * the contents.
+			 */
+			value = w_length;
+		} else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+				&& w_index == 0 && w_value == 0) {
+			struct mtp_device_status *status = cdev->req->buf;
+			status->wLength =
+				__constant_cpu_to_le16(sizeof(*status));
+
+			DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			/* device status is "busy" until we report
+			 * the cancelation to userspace
+			 */
+			if (dev->state == STATE_CANCELED)
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+			else
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_OK);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			value = sizeof(*status);
+		}
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		int rc;
+		cdev->req->zero = value < w_length;
+		cdev->req->length = value;
+		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (rc < 0)
+			ERROR(cdev, "%s: response queue error\n", __func__);
+	}
+	return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct mtp_dev	*dev = func_to_mtp(f);
+	int			id;
+	int			ret;
+
+	dev->cdev = cdev;
+	DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	mtp_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+			&mtp_fullspeed_out_desc, &mtp_intr_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		mtp_highspeed_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_highspeed_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_request *req;
+	int i;
+
+	while ((req = mtp_req_get(dev, &dev->tx_idle)))
+		mtp_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		mtp_request_free(dev->rx_req[i], dev->ep_out);
+	while ((req = mtp_req_get(dev, &dev->intr_idle)))
+		mtp_request_free(req, dev->ep_intr);
+	dev->state = STATE_OFFLINE;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_intr);
+	if (ret) {
+		usb_ep_disable(dev->ep_out);
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+	dev->state = STATE_READY;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "mtp_function_disable\n");
+	dev->state = STATE_OFFLINE;
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+	usb_ep_disable(dev->ep_intr);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int ret = 0;
+
+	printk(KERN_INFO "mtp_bind_config\n");
+
+	/* allocate a string ID for our interface */
+	if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+		ret = usb_string_id(c->cdev);
+		if (ret < 0)
+			return ret;
+		mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+		mtp_interface_desc.iInterface = ret;
+	}
+
+	dev->cdev = c->cdev;
+	dev->function.name = "mtp";
+	dev->function.strings = mtp_strings;
+	if (ptp_config) {
+		dev->function.descriptors = fs_ptp_descs;
+		dev->function.hs_descriptors = hs_ptp_descs;
+	} else {
+		dev->function.descriptors = fs_mtp_descs;
+		dev->function.hs_descriptors = hs_mtp_descs;
+	}
+	dev->function.bind = mtp_function_bind;
+	dev->function.unbind = mtp_function_unbind;
+	dev->function.set_alt = mtp_function_set_alt;
+	dev->function.disable = mtp_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int mtp_setup(void)
+{
+	struct mtp_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	init_waitqueue_head(&dev->intr_wq);
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->ioctl_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->intr_idle);
+
+	dev->wq = create_singlethread_workqueue("f_mtp");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+	INIT_WORK(&dev->send_file_work, send_file_work);
+	INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+	_mtp_dev = dev;
+
+	ret = misc_register(&mtp_device);
+	if (ret)
+		goto err2;
+
+	return 0;
+
+err2:
+	destroy_workqueue(dev->wq);
+err1:
+	_mtp_dev = NULL;
+	kfree(dev);
+	printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void mtp_cleanup(void)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (!dev)
+		return;
+
+	misc_deregister(&mtp_device);
+	destroy_workqueue(dev->wq);
+	_mtp_dev = NULL;
+	kfree(dev);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_ncm.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_ncm.c
new file mode 100644
index 0000000..d7811ae
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_ncm.c
@@ -0,0 +1,1378 @@
+/*
+ * f_ncm.c -- USB CDC Network (NCM) link function driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com>
+ *
+ * The driver borrows from f_ecm.c which is:
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+
+#include <linux/usb/cdc.h>
+
+#include "u_ether.h"
+
+/*
+ * This function is a "CDC Network Control Model" (CDC NCM) Ethernet link.
+ * NCM is intended to be used with high-speed network attachments.
+ *
+ * Note that NCM requires the use of "alternate settings" for its data
+ * interface.  This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ */
+
+/* to trigger crc/non-crc ndp signature */
+
+#define NCM_NDP_HDR_CRC_MASK	0x01000000
+#define NCM_NDP_HDR_CRC		0x01000000
+#define NCM_NDP_HDR_NOCRC	0x00000000
+
+enum ncm_notify_state {
+	NCM_NOTIFY_NONE,		/* don't notify */
+	NCM_NOTIFY_CONNECT,		/* issue CONNECT next */
+	NCM_NOTIFY_SPEED,		/* issue SPEED_CHANGE next */
+};
+
+struct f_ncm {
+	struct gether			port;
+	u8				ctrl_id, data_id;
+
+	char				ethaddr[14];
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	u8				notify_state;
+	bool				is_open;
+
+	struct ndp_parser_opts		*parser_opts;
+	bool				is_crc;
+
+	/*
+	 * for notification, it is accessed from both
+	 * callback and ethernet open/close
+	 */
+	spinlock_t			lock;
+};
+
+static inline struct f_ncm *func_to_ncm(struct usb_function *f)
+{
+	return container_of(f, struct f_ncm, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ncm_bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 *  64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * We cannot group frames so use just the minimal size which ok to put
+ * one max-size ethernet frame.
+ * If the host can group frames, allow it to do that, 16K is selected,
+ * because it's used by default by the current linux host driver
+ */
+#define NTB_DEFAULT_IN_SIZE	USB_CDC_NCM_NTB_MIN_IN_SIZE
+#define NTB_OUT_SIZE		16384
+
+/*
+ * skbs of size less than that will not be aligned
+ * to NCM's dwNtbInMaxSize to save bus bandwidth
+ */
+
+#define	MAX_TX_NONFIXED		(512 * 3)
+
+#define FORMATS_SUPPORTED	(USB_CDC_NCM_NTB16_SUPPORTED |	\
+				 USB_CDC_NCM_NTB32_SUPPORTED)
+
+static struct usb_cdc_ncm_ntb_parameters ntb_parameters = {
+	.wLength = sizeof ntb_parameters,
+	.bmNtbFormatsSupported = cpu_to_le16(FORMATS_SUPPORTED),
+	.dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE),
+	.wNdpInDivisor = cpu_to_le16(4),
+	.wNdpInPayloadRemainder = cpu_to_le16(0),
+	.wNdpInAlignment = cpu_to_le16(4),
+
+	.dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE),
+	.wNdpOutDivisor = cpu_to_le16(4),
+	.wNdpOutPayloadRemainder = cpu_to_le16(0),
+	.wNdpOutAlignment = cpu_to_le16(4),
+};
+
+/*
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ */
+
+#define LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define NCM_STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+static struct usb_interface_assoc_descriptor ncm_iad_desc __initdata = {
+	.bLength =		sizeof ncm_iad_desc,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	/* .bFirstInterface =	DYNAMIC, */
+	.bInterfaceCount =	2,	/* control + data */
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_NCM,
+	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
+	/* .iFunction =		DYNAMIC */
+};
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor ncm_control_intf __initdata = {
+	.bLength =		sizeof ncm_control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_NCM,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ncm_header_desc __initdata = {
+	.bLength =		sizeof ncm_header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ncm_union_desc __initdata = {
+	.bLength =		sizeof(ncm_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_desc __initdata = {
+	.bLength =		sizeof ecm_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+
+	/* this descriptor actually adds value, surprise! */
+	/* .iMACAddress = DYNAMIC */
+	.bmEthernetStatistics =	cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+#define NCAPS	(USB_CDC_NCM_NCAP_ETH_FILTER | USB_CDC_NCM_NCAP_CRC_MODE)
+
+static struct usb_cdc_ncm_desc ncm_desc __initdata = {
+	.bLength =		sizeof ncm_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_NCM_TYPE,
+
+	.bcdNcmVersion =	cpu_to_le16(0x0100),
+	/* can process SetEthernetPacketFilter */
+	.bmNetworkCapabilities = NCAPS,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ncm_data_nop_intf __initdata = {
+	.bLength =		sizeof ncm_data_nop_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	USB_CDC_NCM_PROTO_NTB,
+	/* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ncm_data_intf __initdata = {
+	.bLength =		sizeof ncm_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	USB_CDC_NCM_PROTO_NTB,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_ncm_notify_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(NCM_STATUS_BYTECOUNT),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor fs_ncm_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_ncm_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ncm_fs_function[] __initdata = {
+	(struct usb_descriptor_header *) &ncm_iad_desc,
+	/* CDC NCM control descriptors */
+	(struct usb_descriptor_header *) &ncm_control_intf,
+	(struct usb_descriptor_header *) &ncm_header_desc,
+	(struct usb_descriptor_header *) &ncm_union_desc,
+	(struct usb_descriptor_header *) &ecm_desc,
+	(struct usb_descriptor_header *) &ncm_desc,
+	(struct usb_descriptor_header *) &fs_ncm_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ncm_data_nop_intf,
+	(struct usb_descriptor_header *) &ncm_data_intf,
+	(struct usb_descriptor_header *) &fs_ncm_in_desc,
+	(struct usb_descriptor_header *) &fs_ncm_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_ncm_notify_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(NCM_STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor hs_ncm_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_ncm_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ncm_hs_function[] __initdata = {
+	(struct usb_descriptor_header *) &ncm_iad_desc,
+	/* CDC NCM control descriptors */
+	(struct usb_descriptor_header *) &ncm_control_intf,
+	(struct usb_descriptor_header *) &ncm_header_desc,
+	(struct usb_descriptor_header *) &ncm_union_desc,
+	(struct usb_descriptor_header *) &ecm_desc,
+	(struct usb_descriptor_header *) &ncm_desc,
+	(struct usb_descriptor_header *) &hs_ncm_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ncm_data_nop_intf,
+	(struct usb_descriptor_header *) &ncm_data_intf,
+	(struct usb_descriptor_header *) &hs_ncm_in_desc,
+	(struct usb_descriptor_header *) &hs_ncm_out_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+#define STRING_CTRL_IDX	0
+#define STRING_MAC_IDX	1
+#define STRING_DATA_IDX	2
+#define STRING_IAD_IDX	3
+
+static struct usb_string ncm_string_defs[] = {
+	[STRING_CTRL_IDX].s = "CDC Network Control Model (NCM)",
+	[STRING_MAC_IDX].s = NULL /* DYNAMIC */,
+	[STRING_DATA_IDX].s = "CDC Network Data",
+	[STRING_IAD_IDX].s = "CDC NCM",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings ncm_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		ncm_string_defs,
+};
+
+static struct usb_gadget_strings *ncm_strings[] = {
+	&ncm_string_table,
+	NULL,
+};
+
+/*
+ * Here are options for NCM Datagram Pointer table (NDP) parser.
+ * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3),
+ * in NDP16 offsets and sizes fields are 1 16bit word wide,
+ * in NDP32 -- 2 16bit words wide. Also signatures are different.
+ * To make the parser code the same, put the differences in the structure,
+ * and switch pointers to the structures when the format is changed.
+ */
+
+struct ndp_parser_opts {
+	u32		nth_sign;
+	u32		ndp_sign;
+	unsigned	nth_size;
+	unsigned	ndp_size;
+	unsigned	ndplen_align;
+	/* sizes in u16 units */
+	unsigned	dgram_item_len; /* index or length */
+	unsigned	block_length;
+	unsigned	fp_index;
+	unsigned	reserved1;
+	unsigned	reserved2;
+	unsigned	next_fp_index;
+};
+
+#define INIT_NDP16_OPTS {					\
+		.nth_sign = USB_CDC_NCM_NTH16_SIGN,		\
+		.ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN,	\
+		.nth_size = sizeof(struct usb_cdc_ncm_nth16),	\
+		.ndp_size = sizeof(struct usb_cdc_ncm_ndp16),	\
+		.ndplen_align = 4,				\
+		.dgram_item_len = 1,				\
+		.block_length = 1,				\
+		.fp_index = 1,					\
+		.reserved1 = 0,					\
+		.reserved2 = 0,					\
+		.next_fp_index = 1,				\
+	}
+
+
+#define INIT_NDP32_OPTS {					\
+		.nth_sign = USB_CDC_NCM_NTH32_SIGN,		\
+		.ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN,	\
+		.nth_size = sizeof(struct usb_cdc_ncm_nth32),	\
+		.ndp_size = sizeof(struct usb_cdc_ncm_ndp32),	\
+		.ndplen_align = 8,				\
+		.dgram_item_len = 2,				\
+		.block_length = 2,				\
+		.fp_index = 2,					\
+		.reserved1 = 1,					\
+		.reserved2 = 2,					\
+		.next_fp_index = 2,				\
+	}
+
+static struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS;
+static struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS;
+
+static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
+{
+	switch (size) {
+	case 1:
+		put_unaligned_le16((u16)val, *p);
+		break;
+	case 2:
+		put_unaligned_le32((u32)val, *p);
+
+		break;
+	default:
+		BUG();
+	}
+
+	*p += size;
+}
+
+static inline unsigned get_ncm(__le16 **p, unsigned size)
+{
+	unsigned tmp;
+
+	switch (size) {
+	case 1:
+		tmp = get_unaligned_le16(*p);
+		break;
+	case 2:
+		tmp = get_unaligned_le32(*p);
+		break;
+	default:
+		BUG();
+	}
+
+	*p += size;
+	return tmp;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void ncm_reset_values(struct f_ncm *ncm)
+{
+	ncm->parser_opts = &ndp16_opts;
+	ncm->is_crc = false;
+	ncm->port.cdc_filter = DEFAULT_FILTER;
+
+	/* doesn't make sense for ncm, fixed size used */
+	ncm->port.header_len = 0;
+
+	ncm->port.fixed_out_len = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+	ncm->port.fixed_in_len = NTB_DEFAULT_IN_SIZE;
+}
+
+/*
+ * Context: ncm->lock held
+ */
+static void ncm_do_notify(struct f_ncm *ncm)
+{
+	struct usb_request		*req = ncm->notify_req;
+	struct usb_cdc_notification	*event;
+	struct usb_composite_dev	*cdev = ncm->port.func.config->cdev;
+	__le32				*data;
+	int				status;
+
+	/* notification already in flight? */
+	if (!req)
+		return;
+
+	event = req->buf;
+	switch (ncm->notify_state) {
+	case NCM_NOTIFY_NONE:
+		return;
+
+	case NCM_NOTIFY_CONNECT:
+		event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+		if (ncm->is_open)
+			event->wValue = cpu_to_le16(1);
+		else
+			event->wValue = cpu_to_le16(0);
+		event->wLength = 0;
+		req->length = sizeof *event;
+
+		DBG(cdev, "notify connect %s\n",
+				ncm->is_open ? "true" : "false");
+		ncm->notify_state = NCM_NOTIFY_NONE;
+		break;
+
+	case NCM_NOTIFY_SPEED:
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = cpu_to_le16(0);
+		event->wLength = cpu_to_le16(8);
+		req->length = NCM_STATUS_BYTECOUNT;
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data = req->buf + sizeof *event;
+		data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
+		data[1] = data[0];
+
+		DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
+		ncm->notify_state = NCM_NOTIFY_CONNECT;
+		break;
+	}
+	event->bmRequestType = 0xA1;
+	event->wIndex = cpu_to_le16(ncm->ctrl_id);
+
+	ncm->notify_req = NULL;
+	/*
+	 * In double buffering if there is a space in FIFO,
+	 * completion callback can be called right after the call,
+	 * so unlocking
+	 */
+	spin_unlock(&ncm->lock);
+	status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
+	spin_lock(&ncm->lock);
+	if (status < 0) {
+		ncm->notify_req = req;
+		DBG(cdev, "notify --> %d\n", status);
+	}
+}
+
+/*
+ * Context: ncm->lock held
+ */
+static void ncm_notify(struct f_ncm *ncm)
+{
+	/*
+	 * NOTE on most versions of Linux, host side cdc-ethernet
+	 * won't listen for notifications until its netdevice opens.
+	 * The first notification then sits in the FIFO for a long
+	 * time, and the second one is queued.
+	 *
+	 * If ncm_notify() is called before the second (CONNECT)
+	 * notification is sent, then it will reset to send the SPEED
+	 * notificaion again (and again, and again), but it's not a problem
+	 */
+	ncm->notify_state = NCM_NOTIFY_SPEED;
+	ncm_do_notify(ncm);
+}
+
+static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_ncm			*ncm = req->context;
+	struct usb_composite_dev	*cdev = ncm->port.func.config->cdev;
+	struct usb_cdc_notification	*event = req->buf;
+
+	spin_lock(&ncm->lock);
+	switch (req->status) {
+	case 0:
+		VDBG(cdev, "Notification %02x sent\n",
+		     event->bNotificationType);
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		ncm->notify_state = NCM_NOTIFY_NONE;
+		break;
+	default:
+		DBG(cdev, "event %02x --> %d\n",
+			event->bNotificationType, req->status);
+		break;
+	}
+	ncm->notify_req = req;
+	ncm_do_notify(ncm);
+	spin_unlock(&ncm->lock);
+}
+
+static void ncm_ep0out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	/* now for SET_NTB_INPUT_SIZE only */
+	unsigned		in_size;
+	struct usb_function	*f = req->context;
+	struct f_ncm		*ncm = func_to_ncm(f);
+	struct usb_composite_dev *cdev = ep->driver_data;
+
+	req->context = NULL;
+	if (req->status || req->actual != req->length) {
+		DBG(cdev, "Bad control-OUT transfer\n");
+		goto invalid;
+	}
+
+	in_size = get_unaligned_le32(req->buf);
+	if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+	    in_size > le32_to_cpu(ntb_parameters.dwNtbInMaxSize)) {
+		DBG(cdev, "Got wrong INPUT SIZE (%d) from host\n", in_size);
+		goto invalid;
+	}
+
+	ncm->port.fixed_in_len = in_size;
+	VDBG(cdev, "Set NTB INPUT SIZE %d\n", in_size);
+	return;
+
+invalid:
+	usb_ep_set_halt(ep);
+	return;
+}
+
+static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/*
+	 * composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/*
+		 * see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (w_length != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		DBG(cdev, "packet filter %02x\n", w_value);
+		/*
+		 * REVISIT locking of cdc_filter.  This assumes the UDC
+		 * driver won't have a concurrent packet TX irq running on
+		 * another CPU; or that if it does, this write is atomic...
+		 */
+		ncm->port.cdc_filter = w_value;
+		value = 0;
+		break;
+	/*
+	 * and optionally:
+	 * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+	 * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_NTB_PARAMETERS:
+
+		if (w_length == 0 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		value = w_length > sizeof ntb_parameters ?
+			sizeof ntb_parameters : w_length;
+		memcpy(req->buf, &ntb_parameters, value);
+		VDBG(cdev, "Host asked NTB parameters\n");
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_NTB_INPUT_SIZE:
+
+		if (w_length < 4 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		put_unaligned_le32(ncm->port.fixed_in_len, req->buf);
+		value = 4;
+		VDBG(cdev, "Host asked INPUT SIZE, sending %d\n",
+		     ncm->port.fixed_in_len);
+		break;
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_SET_NTB_INPUT_SIZE:
+	{
+		if (w_length != 4 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		req->complete = ncm_ep0out_complete;
+		req->length = w_length;
+		req->context = f;
+
+		value = req->length;
+		break;
+	}
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_NTB_FORMAT:
+	{
+		uint16_t format;
+
+		if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		format = (ncm->parser_opts == &ndp16_opts) ? 0x0000 : 0x0001;
+		put_unaligned_le16(format, req->buf);
+		value = 2;
+		VDBG(cdev, "Host asked NTB FORMAT, sending %d\n", format);
+		break;
+	}
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_SET_NTB_FORMAT:
+	{
+		if (w_length != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		switch (w_value) {
+		case 0x0000:
+			ncm->parser_opts = &ndp16_opts;
+			DBG(cdev, "NCM16 selected\n");
+			break;
+		case 0x0001:
+			ncm->parser_opts = &ndp32_opts;
+			DBG(cdev, "NCM32 selected\n");
+			break;
+		default:
+			goto invalid;
+		}
+		value = 0;
+		break;
+	}
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_CRC_MODE:
+	{
+		uint16_t is_crc;
+
+		if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		is_crc = ncm->is_crc ? 0x0001 : 0x0000;
+		put_unaligned_le16(is_crc, req->buf);
+		value = 2;
+		VDBG(cdev, "Host asked CRC MODE, sending %d\n", is_crc);
+		break;
+	}
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_SET_CRC_MODE:
+	{
+		int ndp_hdr_crc = 0;
+
+		if (w_length != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		switch (w_value) {
+		case 0x0000:
+			ncm->is_crc = false;
+			ndp_hdr_crc = NCM_NDP_HDR_NOCRC;
+			DBG(cdev, "non-CRC mode selected\n");
+			break;
+		case 0x0001:
+			ncm->is_crc = true;
+			ndp_hdr_crc = NCM_NDP_HDR_CRC;
+			DBG(cdev, "CRC mode selected\n");
+			break;
+		default:
+			goto invalid;
+		}
+		ncm->parser_opts->ndp_sign &= ~NCM_NDP_HDR_CRC_MASK;
+		ncm->parser_opts->ndp_sign |= ndp_hdr_crc;
+		value = 0;
+		break;
+	}
+
+	/* and disabled in ncm descriptor: */
+	/* case USB_CDC_GET_NET_ADDRESS: */
+	/* case USB_CDC_SET_NET_ADDRESS: */
+	/* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */
+	/* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */
+
+	default:
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "ncm req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "ncm req %02x.%02x response err %d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* Control interface has only altsetting 0 */
+	if (intf == ncm->ctrl_id) {
+		if (alt != 0)
+			goto fail;
+
+		if (ncm->notify->driver_data) {
+			DBG(cdev, "reset ncm control %d\n", intf);
+			usb_ep_disable(ncm->notify);
+		}
+
+		if (!(ncm->notify->desc)) {
+			DBG(cdev, "init ncm ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, ncm->notify))
+				goto fail;
+		}
+		usb_ep_enable(ncm->notify);
+		ncm->notify->driver_data = ncm;
+
+	/* Data interface has two altsettings, 0 and 1 */
+	} else if (intf == ncm->data_id) {
+		if (alt > 1)
+			goto fail;
+
+		if (ncm->port.in_ep->driver_data) {
+			DBG(cdev, "reset ncm\n");
+			gether_disconnect(&ncm->port);
+			ncm_reset_values(ncm);
+		}
+
+		/*
+		 * CDC Network only sends data in non-default altsettings.
+		 * Changing altsettings resets filters, statistics, etc.
+		 */
+		if (alt == 1) {
+			struct net_device	*net;
+
+			if (!ncm->port.in_ep->desc ||
+			    !ncm->port.out_ep->desc) {
+				DBG(cdev, "init ncm\n");
+				if (config_ep_by_speed(cdev->gadget, f,
+						       ncm->port.in_ep) ||
+				    config_ep_by_speed(cdev->gadget, f,
+						       ncm->port.out_ep)) {
+					ncm->port.in_ep->desc = NULL;
+					ncm->port.out_ep->desc = NULL;
+					goto fail;
+				}
+			}
+
+			/* TODO */
+			/* Enable zlps by default for NCM conformance;
+			 * override for musb_hdrc (avoids txdma ovhead)
+			 */
+			ncm->port.is_zlp_ok = !(
+				gadget_is_musbhdrc(cdev->gadget)
+				);
+			ncm->port.cdc_filter = DEFAULT_FILTER;
+			DBG(cdev, "activate ncm\n");
+			net = gether_connect(&ncm->port);
+			if (IS_ERR(net))
+				return PTR_ERR(net);
+		}
+
+		spin_lock(&ncm->lock);
+		ncm_notify(ncm);
+		spin_unlock(&ncm->lock);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+/*
+ * Because the data interface supports multiple altsettings,
+ * this NCM function *MUST* implement a get_alt() method.
+ */
+static int ncm_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+
+	if (intf == ncm->ctrl_id)
+		return 0;
+	return ncm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static struct sk_buff *ncm_wrap_ntb(struct gether *port,
+				    struct sk_buff *skb)
+{
+	struct f_ncm	*ncm = func_to_ncm(&port->func);
+	struct sk_buff	*skb2;
+	int		ncb_len = 0;
+	__le16		*tmp;
+	int		div = ntb_parameters.wNdpInDivisor;
+	int		rem = ntb_parameters.wNdpInPayloadRemainder;
+	int		pad;
+	int		ndp_align = ntb_parameters.wNdpInAlignment;
+	int		ndp_pad;
+	unsigned	max_size = ncm->port.fixed_in_len;
+	struct ndp_parser_opts *opts = ncm->parser_opts;
+	unsigned	crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
+
+	ncb_len += opts->nth_size;
+	ndp_pad = ALIGN(ncb_len, ndp_align) - ncb_len;
+	ncb_len += ndp_pad;
+	ncb_len += opts->ndp_size;
+	ncb_len += 2 * 2 * opts->dgram_item_len; /* Datagram entry */
+	ncb_len += 2 * 2 * opts->dgram_item_len; /* Zero datagram entry */
+	pad = ALIGN(ncb_len, div) + rem - ncb_len;
+	ncb_len += pad;
+
+	if (ncb_len + skb->len + crc_len > max_size) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+
+	skb2 = skb_copy_expand(skb, ncb_len,
+			       max_size - skb->len - ncb_len - crc_len,
+			       GFP_ATOMIC);
+	dev_kfree_skb_any(skb);
+	if (!skb2)
+		return NULL;
+
+	skb = skb2;
+
+	tmp = (void *) skb_push(skb, ncb_len);
+	memset(tmp, 0, ncb_len);
+
+	put_unaligned_le32(opts->nth_sign, tmp); /* dwSignature */
+	tmp += 2;
+	/* wHeaderLength */
+	put_unaligned_le16(opts->nth_size, tmp++);
+	tmp++; /* skip wSequence */
+	put_ncm(&tmp, opts->block_length, skb->len); /* (d)wBlockLength */
+	/* (d)wFpIndex */
+	/* the first pointer is right after the NTH + align */
+	put_ncm(&tmp, opts->fp_index, opts->nth_size + ndp_pad);
+
+	tmp = (void *)tmp + ndp_pad;
+
+	/* NDP */
+	put_unaligned_le32(opts->ndp_sign, tmp); /* dwSignature */
+	tmp += 2;
+	/* wLength */
+	put_unaligned_le16(ncb_len - opts->nth_size - pad, tmp++);
+
+	tmp += opts->reserved1;
+	tmp += opts->next_fp_index; /* skip reserved (d)wNextFpIndex */
+	tmp += opts->reserved2;
+
+	if (ncm->is_crc) {
+		uint32_t crc;
+
+		crc = ~crc32_le(~0,
+				skb->data + ncb_len,
+				skb->len - ncb_len);
+		put_unaligned_le32(crc, skb->data + skb->len);
+		skb_put(skb, crc_len);
+	}
+
+	/* (d)wDatagramIndex[0] */
+	put_ncm(&tmp, opts->dgram_item_len, ncb_len);
+	/* (d)wDatagramLength[0] */
+	put_ncm(&tmp, opts->dgram_item_len, skb->len - ncb_len);
+	/* (d)wDatagramIndex[1] and  (d)wDatagramLength[1] already zeroed */
+
+	if (skb->len > MAX_TX_NONFIXED)
+		memset(skb_put(skb, max_size - skb->len),
+		       0, max_size - skb->len);
+
+	return skb;
+}
+
+static int ncm_unwrap_ntb(struct gether *port,
+			  struct sk_buff *skb,
+			  struct sk_buff_head *list)
+{
+	struct f_ncm	*ncm = func_to_ncm(&port->func);
+	__le16		*tmp = (void *) skb->data;
+	unsigned	index, index2;
+	unsigned	dg_len, dg_len2;
+	unsigned	ndp_len;
+	struct sk_buff	*skb2;
+	int		ret = -EINVAL;
+	unsigned	max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+	struct ndp_parser_opts *opts = ncm->parser_opts;
+	unsigned	crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
+	int		dgram_counter;
+
+	/* dwSignature */
+	if (get_unaligned_le32(tmp) != opts->nth_sign) {
+		INFO(port->func.config->cdev, "Wrong NTH SIGN, skblen %d\n",
+			skb->len);
+		print_hex_dump(KERN_INFO, "HEAD:", DUMP_PREFIX_ADDRESS, 32, 1,
+			       skb->data, 32, false);
+
+		goto err;
+	}
+	tmp += 2;
+	/* wHeaderLength */
+	if (get_unaligned_le16(tmp++) != opts->nth_size) {
+		INFO(port->func.config->cdev, "Wrong NTB headersize\n");
+		goto err;
+	}
+	tmp++; /* skip wSequence */
+
+	/* (d)wBlockLength */
+	if (get_ncm(&tmp, opts->block_length) > max_size) {
+		INFO(port->func.config->cdev, "OUT size exceeded\n");
+		goto err;
+	}
+
+	index = get_ncm(&tmp, opts->fp_index);
+	/* NCM 3.2 */
+	if (((index % 4) != 0) && (index < opts->nth_size)) {
+		INFO(port->func.config->cdev, "Bad index: %x\n",
+			index);
+		goto err;
+	}
+
+	/* walk through NDP */
+	tmp = ((void *)skb->data) + index;
+	if (get_unaligned_le32(tmp) != opts->ndp_sign) {
+		INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
+		goto err;
+	}
+	tmp += 2;
+
+	ndp_len = get_unaligned_le16(tmp++);
+	/*
+	 * NCM 3.3.1
+	 * entry is 2 items
+	 * item size is 16/32 bits, opts->dgram_item_len * 2 bytes
+	 * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
+	 */
+	if ((ndp_len < opts->ndp_size + 2 * 2 * (opts->dgram_item_len * 2))
+	    || (ndp_len % opts->ndplen_align != 0)) {
+		INFO(port->func.config->cdev, "Bad NDP length: %x\n", ndp_len);
+		goto err;
+	}
+	tmp += opts->reserved1;
+	tmp += opts->next_fp_index; /* skip reserved (d)wNextFpIndex */
+	tmp += opts->reserved2;
+
+	ndp_len -= opts->ndp_size;
+	index2 = get_ncm(&tmp, opts->dgram_item_len);
+	dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
+	dgram_counter = 0;
+
+	do {
+		index = index2;
+		dg_len = dg_len2;
+		if (dg_len < 14 + crc_len) { /* ethernet header + crc */
+			INFO(port->func.config->cdev, "Bad dgram length: %x\n",
+			     dg_len);
+			goto err;
+		}
+		if (ncm->is_crc) {
+			uint32_t crc, crc2;
+
+			crc = get_unaligned_le32(skb->data +
+						 index + dg_len - crc_len);
+			crc2 = ~crc32_le(~0,
+					 skb->data + index,
+					 dg_len - crc_len);
+			if (crc != crc2) {
+				INFO(port->func.config->cdev, "Bad CRC\n");
+				goto err;
+			}
+		}
+
+		index2 = get_ncm(&tmp, opts->dgram_item_len);
+		dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
+
+		if (index2 == 0 || dg_len2 == 0) {
+			skb2 = skb;
+		} else {
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (skb2 == NULL)
+				goto err;
+		}
+
+		if (!skb_pull(skb2, index)) {
+			ret = -EOVERFLOW;
+			goto err;
+		}
+
+		skb_trim(skb2, dg_len - crc_len);
+		skb_queue_tail(list, skb2);
+
+		ndp_len -= 2 * (opts->dgram_item_len * 2);
+
+		dgram_counter++;
+
+		if (index2 == 0 || dg_len2 == 0)
+			break;
+	} while (ndp_len > 2 * (opts->dgram_item_len * 2)); /* zero entry */
+
+	VDBG(port->func.config->cdev,
+	     "Parsed NTB with %d frames\n", dgram_counter);
+	return 0;
+err:
+	skb_queue_purge(list);
+	dev_kfree_skb_any(skb);
+	return ret;
+}
+
+static void ncm_disable(struct usb_function *f)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "ncm deactivated\n");
+
+	if (ncm->port.in_ep->driver_data)
+		gether_disconnect(&ncm->port);
+
+	if (ncm->notify->driver_data) {
+		usb_ep_disable(ncm->notify);
+		ncm->notify->driver_data = NULL;
+		ncm->notify->desc = NULL;
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ *   - disconnected/unconfigured
+ *   - configured but inactive (data alt 0)
+ *   - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting).  Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ncm_open(struct gether *geth)
+{
+	struct f_ncm		*ncm = func_to_ncm(&geth->func);
+
+	DBG(ncm->port.func.config->cdev, "%s\n", __func__);
+
+	spin_lock(&ncm->lock);
+	ncm->is_open = true;
+	ncm_notify(ncm);
+	spin_unlock(&ncm->lock);
+}
+
+static void ncm_close(struct gether *geth)
+{
+	struct f_ncm		*ncm = func_to_ncm(&geth->func);
+
+	DBG(ncm->port.func.config->cdev, "%s\n", __func__);
+
+	spin_lock(&ncm->lock);
+	ncm->is_open = false;
+	ncm_notify(ncm);
+	spin_unlock(&ncm->lock);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int __init
+ncm_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_ncm		*ncm = func_to_ncm(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ncm->ctrl_id = status;
+	ncm_iad_desc.bFirstInterface = status;
+
+	ncm_control_intf.bInterfaceNumber = status;
+	ncm_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ncm->data_id = status;
+
+	ncm_data_nop_intf.bInterfaceNumber = status;
+	ncm_data_intf.bInterfaceNumber = status;
+	ncm_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
+	if (!ep)
+		goto fail;
+	ncm->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
+	if (!ep)
+		goto fail;
+	ncm->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
+	if (!ep)
+		goto fail;
+	ncm->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!ncm->notify_req)
+		goto fail;
+	ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!ncm->notify_req->buf)
+		goto fail;
+	ncm->notify_req->context = ncm;
+	ncm->notify_req->complete = ncm_notify_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(ncm_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/*
+	 * support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_ncm_in_desc.bEndpointAddress =
+				fs_ncm_in_desc.bEndpointAddress;
+		hs_ncm_out_desc.bEndpointAddress =
+				fs_ncm_out_desc.bEndpointAddress;
+		hs_ncm_notify_desc.bEndpointAddress =
+				fs_ncm_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(ncm_hs_function);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	/*
+	 * NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	ncm->port.open = ncm_open;
+	ncm->port.close = ncm_close;
+
+	DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			ncm->port.in_ep->name, ncm->port.out_ep->name,
+			ncm->notify->name);
+	return 0;
+
+fail:
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+
+	if (ncm->notify_req) {
+		kfree(ncm->notify_req->buf);
+		usb_ep_free_request(ncm->notify, ncm->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (ncm->notify)
+		ncm->notify->driver_data = NULL;
+	if (ncm->port.out_ep)
+		ncm->port.out_ep->driver_data = NULL;
+	if (ncm->port.in_ep)
+		ncm->port.in_ep->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+ncm_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+
+	DBG(c->cdev, "ncm unbind\n");
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(ncm->notify_req->buf);
+	usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+	ncm_string_defs[1].s = NULL;
+	kfree(ncm);
+}
+
+/**
+ * ncm_bind_config - add CDC Network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int __init ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	struct f_ncm	*ncm;
+	int		status;
+
+	if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+		return -EINVAL;
+
+	/* maybe allocate device-global string IDs */
+	if (ncm_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ncm_string_defs[STRING_CTRL_IDX].id = status;
+		ncm_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ncm_string_defs[STRING_DATA_IDX].id = status;
+		ncm_data_nop_intf.iInterface = status;
+		ncm_data_intf.iInterface = status;
+
+		/* MAC address */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ncm_string_defs[STRING_MAC_IDX].id = status;
+		ecm_desc.iMACAddress = status;
+
+		/* IAD */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ncm_string_defs[STRING_IAD_IDX].id = status;
+		ncm_iad_desc.iFunction = status;
+	}
+
+	/* allocate and initialize one new instance */
+	ncm = kzalloc(sizeof *ncm, GFP_KERNEL);
+	if (!ncm)
+		return -ENOMEM;
+
+	/* export host's Ethernet address in CDC format */
+	snprintf(ncm->ethaddr, sizeof ncm->ethaddr,
+		"%02X%02X%02X%02X%02X%02X",
+		ethaddr[0], ethaddr[1], ethaddr[2],
+		ethaddr[3], ethaddr[4], ethaddr[5]);
+	ncm_string_defs[1].s = ncm->ethaddr;
+
+	spin_lock_init(&ncm->lock);
+	ncm_reset_values(ncm);
+	ncm->port.is_fixed = true;
+
+	ncm->port.func.name = "cdc_network";
+	ncm->port.func.strings = ncm_strings;
+	/* descriptors are per-instance copies */
+	ncm->port.func.bind = ncm_bind;
+	ncm->port.func.unbind = ncm_unbind;
+	ncm->port.func.set_alt = ncm_set_alt;
+	ncm->port.func.get_alt = ncm_get_alt;
+	ncm->port.func.setup = ncm_setup;
+	ncm->port.func.disable = ncm_disable;
+
+	ncm->port.wrap = ncm_wrap_ntb;
+	ncm->port.unwrap = ncm_unwrap_ntb;
+
+	status = usb_add_function(c, &ncm->port.func);
+	if (status) {
+		ncm_string_defs[1].s = NULL;
+		kfree(ncm);
+	}
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_obex.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_obex.c
new file mode 100644
index 0000000..41b1564
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_obex.c
@@ -0,0 +1,471 @@
+/*
+ * f_obex.c -- USB CDC OBEX function driver
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * Based on f_acm.c by Al Borchers and David Brownell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include "u_serial.h"
+#include "gadget_chips.h"
+
+
+/*
+ * This CDC OBEX function support just packages a TTY-ish byte stream.
+ * A user mode server will put it into "raw" mode and handle all the
+ * relevant protocol details ... this is just a kernel passthrough.
+ * When possible, we prevent gadget enumeration until that server is
+ * ready to handle the commands.
+ */
+
+struct f_obex {
+	struct gserial			port;
+	u8				ctrl_id;
+	u8				data_id;
+	u8				port_num;
+	u8				can_activate;
+};
+
+static inline struct f_obex *func_to_obex(struct usb_function *f)
+{
+	return container_of(f, struct f_obex, port.func);
+}
+
+static inline struct f_obex *port_to_obex(struct gserial *p)
+{
+	return container_of(p, struct f_obex, port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define OBEX_CTRL_IDX	0
+#define OBEX_DATA_IDX	1
+
+static struct usb_string obex_string_defs[] = {
+	[OBEX_CTRL_IDX].s	= "CDC Object Exchange (OBEX)",
+	[OBEX_DATA_IDX].s	= "CDC OBEX Data",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings obex_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= obex_string_defs,
+};
+
+static struct usb_gadget_strings *obex_strings[] = {
+	&obex_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_interface_descriptor obex_control_intf __initdata = {
+	.bLength		= sizeof(obex_control_intf),
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= 0,
+
+	.bAlternateSetting	= 0,
+	.bNumEndpoints		= 0,
+	.bInterfaceClass	= USB_CLASS_COMM,
+	.bInterfaceSubClass	= USB_CDC_SUBCLASS_OBEX,
+};
+
+static struct usb_interface_descriptor obex_data_nop_intf __initdata = {
+	.bLength		= sizeof(obex_data_nop_intf),
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= 1,
+
+	.bAlternateSetting	= 0,
+	.bNumEndpoints		= 0,
+	.bInterfaceClass	= USB_CLASS_CDC_DATA,
+};
+
+static struct usb_interface_descriptor obex_data_intf __initdata = {
+	.bLength		= sizeof(obex_data_intf),
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= 2,
+
+	.bAlternateSetting	= 1,
+	.bNumEndpoints		= 2,
+	.bInterfaceClass	= USB_CLASS_CDC_DATA,
+};
+
+static struct usb_cdc_header_desc obex_cdc_header_desc __initdata = {
+	.bLength		= sizeof(obex_cdc_header_desc),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= USB_CDC_HEADER_TYPE,
+	.bcdCDC			= cpu_to_le16(0x0120),
+};
+
+static struct usb_cdc_union_desc obex_cdc_union_desc __initdata = {
+	.bLength		= sizeof(obex_cdc_union_desc),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= USB_CDC_UNION_TYPE,
+	.bMasterInterface0	= 1,
+	.bSlaveInterface0	= 2,
+};
+
+static struct usb_cdc_obex_desc obex_desc __initdata = {
+	.bLength		= sizeof(obex_desc),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= USB_CDC_OBEX_TYPE,
+	.bcdVersion		= cpu_to_le16(0x0100),
+};
+
+/* High-Speed Support */
+
+static struct usb_endpoint_descriptor obex_hs_ep_out_desc __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+
+	.bEndpointAddress	= USB_DIR_OUT,
+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize		= cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor obex_hs_ep_in_desc __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize		= cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *hs_function[] __initdata = {
+	(struct usb_descriptor_header *) &obex_control_intf,
+	(struct usb_descriptor_header *) &obex_cdc_header_desc,
+	(struct usb_descriptor_header *) &obex_desc,
+	(struct usb_descriptor_header *) &obex_cdc_union_desc,
+
+	(struct usb_descriptor_header *) &obex_data_nop_intf,
+	(struct usb_descriptor_header *) &obex_data_intf,
+	(struct usb_descriptor_header *) &obex_hs_ep_in_desc,
+	(struct usb_descriptor_header *) &obex_hs_ep_out_desc,
+	NULL,
+};
+
+/* Full-Speed Support */
+
+static struct usb_endpoint_descriptor obex_fs_ep_in_desc __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor obex_fs_ep_out_desc __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+
+	.bEndpointAddress	= USB_DIR_OUT,
+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_function[] __initdata = {
+	(struct usb_descriptor_header *) &obex_control_intf,
+	(struct usb_descriptor_header *) &obex_cdc_header_desc,
+	(struct usb_descriptor_header *) &obex_desc,
+	(struct usb_descriptor_header *) &obex_cdc_union_desc,
+
+	(struct usb_descriptor_header *) &obex_data_nop_intf,
+	(struct usb_descriptor_header *) &obex_data_intf,
+	(struct usb_descriptor_header *) &obex_fs_ep_in_desc,
+	(struct usb_descriptor_header *) &obex_fs_ep_out_desc,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int obex_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_obex		*obex = func_to_obex(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	if (intf == obex->ctrl_id) {
+		if (alt != 0)
+			goto fail;
+		/* NOP */
+		DBG(cdev, "reset obex ttyGS%d control\n", obex->port_num);
+
+	} else if (intf == obex->data_id) {
+		if (alt > 1)
+			goto fail;
+
+		if (obex->port.in->driver_data) {
+			DBG(cdev, "reset obex ttyGS%d\n", obex->port_num);
+			gserial_disconnect(&obex->port);
+		}
+
+		if (!obex->port.in->desc || !obex->port.out->desc) {
+			DBG(cdev, "init obex ttyGS%d\n", obex->port_num);
+			if (config_ep_by_speed(cdev->gadget, f,
+					       obex->port.in) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       obex->port.out)) {
+				obex->port.out->desc = NULL;
+				obex->port.in->desc = NULL;
+				goto fail;
+			}
+		}
+
+		if (alt == 1) {
+			DBG(cdev, "activate obex ttyGS%d\n", obex->port_num);
+			gserial_connect(&obex->port, &obex->port_num);
+		}
+
+	} else
+		goto fail;
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+static int obex_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_obex		*obex = func_to_obex(f);
+
+	if (intf == obex->ctrl_id)
+		return 0;
+
+	return obex->port.in->driver_data ? 1 : 0;
+}
+
+static void obex_disable(struct usb_function *f)
+{
+	struct f_obex	*obex = func_to_obex(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "obex ttyGS%d disable\n", obex->port_num);
+	gserial_disconnect(&obex->port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void obex_connect(struct gserial *g)
+{
+	struct f_obex		*obex = port_to_obex(g);
+	struct usb_composite_dev *cdev = g->func.config->cdev;
+	int			status;
+
+	if (!obex->can_activate)
+		return;
+
+	status = usb_function_activate(&g->func);
+	if (status)
+		DBG(cdev, "obex ttyGS%d function activate --> %d\n",
+			obex->port_num, status);
+}
+
+static void obex_disconnect(struct gserial *g)
+{
+	struct f_obex		*obex = port_to_obex(g);
+	struct usb_composite_dev *cdev = g->func.config->cdev;
+	int			status;
+
+	if (!obex->can_activate)
+		return;
+
+	status = usb_function_deactivate(&g->func);
+	if (status)
+		DBG(cdev, "obex ttyGS%d function deactivate --> %d\n",
+			obex->port_num, status);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init
+obex_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_obex		*obex = func_to_obex(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	obex->ctrl_id = status;
+
+	obex_control_intf.bInterfaceNumber = status;
+	obex_cdc_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	obex->data_id = status;
+
+	obex_data_nop_intf.bInterfaceNumber = status;
+	obex_data_intf.bInterfaceNumber = status;
+	obex_cdc_union_desc.bSlaveInterface0 = status;
+
+	/* allocate instance-specific endpoints */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_in_desc);
+	if (!ep)
+		goto fail;
+	obex->port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_out_desc);
+	if (!ep)
+		goto fail;
+	obex->port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(fs_function);
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+
+		obex_hs_ep_in_desc.bEndpointAddress =
+				obex_fs_ep_in_desc.bEndpointAddress;
+		obex_hs_ep_out_desc.bEndpointAddress =
+				obex_fs_ep_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(hs_function);
+	}
+
+	/* Avoid letting this gadget enumerate until the userspace
+	 * OBEX server is active.
+	 */
+	status = usb_function_deactivate(f);
+	if (status < 0)
+		WARNING(cdev, "obex ttyGS%d: can't prevent enumeration, %d\n",
+			obex->port_num, status);
+	else
+		obex->can_activate = true;
+
+
+	DBG(cdev, "obex ttyGS%d: %s speed IN/%s OUT/%s\n",
+			obex->port_num,
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			obex->port.in->name, obex->port.out->name);
+
+	return 0;
+
+fail:
+	/* we might as well release our claims on endpoints */
+	if (obex->port.out)
+		obex->port.out->driver_data = NULL;
+	if (obex->port.in)
+		obex->port.in->driver_data = NULL;
+
+	ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+
+	return status;
+}
+
+static void
+obex_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+	kfree(func_to_obex(f));
+}
+
+/* Some controllers can't support CDC OBEX ... */
+static inline bool can_support_obex(struct usb_configuration *c)
+{
+	/* Since the first interface is a NOP, we can ignore the
+	 * issue of multi-interface support on most controllers.
+	 *
+	 * Altsettings are mandatory, however...
+	 */
+	if (!gadget_supports_altsettings(c->cdev->gadget))
+		return false;
+
+	/* everything else is *probably* fine ... */
+	return true;
+}
+
+/**
+ * obex_bind_config - add a CDC OBEX function to a configuration
+ * @c: the configuration to support the CDC OBEX instance
+ * @port_num: /dev/ttyGS* port this interface will use
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gserial_setup() with enough ports to
+ * handle all the ones it binds.  Caller is also responsible
+ * for calling @gserial_cleanup() before module unload.
+ */
+int __init obex_bind_config(struct usb_configuration *c, u8 port_num)
+{
+	struct f_obex	*obex;
+	int		status;
+
+	if (!can_support_obex(c))
+		return -EINVAL;
+
+	/* maybe allocate device-global string IDs, and patch descriptors */
+	if (obex_string_defs[OBEX_CTRL_IDX].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		obex_string_defs[OBEX_CTRL_IDX].id = status;
+
+		obex_control_intf.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		obex_string_defs[OBEX_DATA_IDX].id = status;
+
+		obex_data_nop_intf.iInterface =
+			obex_data_intf.iInterface = status;
+	}
+
+	/* allocate and initialize one new instance */
+	obex = kzalloc(sizeof *obex, GFP_KERNEL);
+	if (!obex)
+		return -ENOMEM;
+
+	obex->port_num = port_num;
+
+	obex->port.connect = obex_connect;
+	obex->port.disconnect = obex_disconnect;
+
+	obex->port.func.name = "obex";
+	obex->port.func.strings = obex_strings;
+	/* descriptors are per-instance copies */
+	obex->port.func.bind = obex_bind;
+	obex->port.func.unbind = obex_unbind;
+	obex->port.func.set_alt = obex_set_alt;
+	obex->port.func.get_alt = obex_get_alt;
+	obex->port.func.disable = obex_disable;
+
+	status = usb_add_function(c, &obex->port.func);
+	if (status)
+		kfree(obex);
+
+	return status;
+}
+
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_phonet.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_phonet.c
new file mode 100644
index 0000000..16512f9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_phonet.c
@@ -0,0 +1,633 @@
+/*
+ * f_phonet.c -- USB CDC Phonet function
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ *
+ * Author: Rémi Denis-Courmont
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_phonet.h>
+#include <linux/if_arp.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+
+#include "u_phonet.h"
+
+#define PN_MEDIA_USB	0x1B
+#define MAXPACKET	512
+#if (PAGE_SIZE % MAXPACKET)
+#error MAXPACKET must divide PAGE_SIZE!
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+struct phonet_port {
+	struct f_phonet			*usb;
+	spinlock_t			lock;
+};
+
+struct f_phonet {
+	struct usb_function		function;
+	struct {
+		struct sk_buff		*skb;
+		spinlock_t		lock;
+	} rx;
+	struct net_device		*dev;
+	struct usb_ep			*in_ep, *out_ep;
+
+	struct usb_request		*in_req;
+	struct usb_request		*out_reqv[0];
+};
+
+static int phonet_rxq_size = 17;
+
+static inline struct f_phonet *func_to_pn(struct usb_function *f)
+{
+	return container_of(f, struct f_phonet, function);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define USB_CDC_SUBCLASS_PHONET	0xfe
+#define USB_CDC_PHONET_TYPE	0xab
+
+static struct usb_interface_descriptor
+pn_control_intf_desc = {
+	.bLength =		sizeof pn_control_intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber =	DYNAMIC, */
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_PHONET,
+};
+
+static const struct usb_cdc_header_desc
+pn_header_desc = {
+	.bLength =		sizeof pn_header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static const struct usb_cdc_header_desc
+pn_phonet_desc = {
+	.bLength =		sizeof pn_phonet_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_PHONET_TYPE,
+	.bcdCDC =		cpu_to_le16(0x1505), /* ??? */
+};
+
+static struct usb_cdc_union_desc
+pn_union_desc = {
+	.bLength =		sizeof pn_union_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+
+	/* .bMasterInterface0 =	DYNAMIC, */
+	/* .bSlaveInterface0 =	DYNAMIC, */
+};
+
+static struct usb_interface_descriptor
+pn_data_nop_intf_desc = {
+	.bLength =		sizeof pn_data_nop_intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber =	DYNAMIC, */
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+};
+
+static struct usb_interface_descriptor
+pn_data_intf_desc = {
+	.bLength =		sizeof pn_data_intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber =	DYNAMIC, */
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+};
+
+static struct usb_endpoint_descriptor
+pn_fs_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor
+pn_hs_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(MAXPACKET),
+};
+
+static struct usb_endpoint_descriptor
+pn_fs_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor
+pn_hs_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *fs_pn_function[] = {
+	(struct usb_descriptor_header *) &pn_control_intf_desc,
+	(struct usb_descriptor_header *) &pn_header_desc,
+	(struct usb_descriptor_header *) &pn_phonet_desc,
+	(struct usb_descriptor_header *) &pn_union_desc,
+	(struct usb_descriptor_header *) &pn_data_nop_intf_desc,
+	(struct usb_descriptor_header *) &pn_data_intf_desc,
+	(struct usb_descriptor_header *) &pn_fs_sink_desc,
+	(struct usb_descriptor_header *) &pn_fs_source_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_pn_function[] = {
+	(struct usb_descriptor_header *) &pn_control_intf_desc,
+	(struct usb_descriptor_header *) &pn_header_desc,
+	(struct usb_descriptor_header *) &pn_phonet_desc,
+	(struct usb_descriptor_header *) &pn_union_desc,
+	(struct usb_descriptor_header *) &pn_data_nop_intf_desc,
+	(struct usb_descriptor_header *) &pn_data_intf_desc,
+	(struct usb_descriptor_header *) &pn_hs_sink_desc,
+	(struct usb_descriptor_header *) &pn_hs_source_desc,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int pn_net_open(struct net_device *dev)
+{
+	netif_wake_queue(dev);
+	return 0;
+}
+
+static int pn_net_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_phonet *fp = ep->driver_data;
+	struct net_device *dev = fp->dev;
+	struct sk_buff *skb = req->context;
+
+	switch (req->status) {
+	case 0:
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += skb->len;
+		break;
+
+	case -ESHUTDOWN: /* disconnected */
+	case -ECONNRESET: /* disabled */
+		dev->stats.tx_aborted_errors++;
+	default:
+		dev->stats.tx_errors++;
+	}
+
+	dev_kfree_skb_any(skb);
+	netif_wake_queue(dev);
+}
+
+static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct phonet_port *port = netdev_priv(dev);
+	struct f_phonet *fp;
+	struct usb_request *req;
+	unsigned long flags;
+
+	if (skb->protocol != htons(ETH_P_PHONET))
+		goto out;
+
+	spin_lock_irqsave(&port->lock, flags);
+	fp = port->usb;
+	if (unlikely(!fp)) /* race with carrier loss */
+		goto out_unlock;
+
+	req = fp->in_req;
+	req->buf = skb->data;
+	req->length = skb->len;
+	req->complete = pn_tx_complete;
+	req->zero = 1;
+	req->context = skb;
+
+	if (unlikely(usb_ep_queue(fp->in_ep, req, GFP_ATOMIC)))
+		goto out_unlock;
+
+	netif_stop_queue(dev);
+	skb = NULL;
+
+out_unlock:
+	spin_unlock_irqrestore(&port->lock, flags);
+out:
+	if (unlikely(skb)) {
+		dev_kfree_skb(skb);
+		dev->stats.tx_dropped++;
+	}
+	return NETDEV_TX_OK;
+}
+
+static int pn_net_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static const struct net_device_ops pn_netdev_ops = {
+	.ndo_open	= pn_net_open,
+	.ndo_stop	= pn_net_close,
+	.ndo_start_xmit	= pn_net_xmit,
+	.ndo_change_mtu	= pn_net_mtu,
+};
+
+static void pn_net_setup(struct net_device *dev)
+{
+	dev->features		= 0;
+	dev->type		= ARPHRD_PHONET;
+	dev->flags		= IFF_POINTOPOINT | IFF_NOARP;
+	dev->mtu		= PHONET_DEV_MTU;
+	dev->hard_header_len	= 1;
+	dev->dev_addr[0]	= PN_MEDIA_USB;
+	dev->addr_len		= 1;
+	dev->tx_queue_len	= 1;
+
+	dev->netdev_ops		= &pn_netdev_ops;
+	dev->destructor		= free_netdev;
+	dev->header_ops		= &phonet_header_ops;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Queue buffer for data from the host
+ */
+static int
+pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct page *page;
+	int err;
+
+	page = alloc_page(gfp_flags);
+	if (!page)
+		return -ENOMEM;
+
+	req->buf = page_address(page);
+	req->length = PAGE_SIZE;
+	req->context = page;
+
+	err = usb_ep_queue(fp->out_ep, req, gfp_flags);
+	if (unlikely(err))
+		put_page(page);
+	return err;
+}
+
+static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_phonet *fp = ep->driver_data;
+	struct net_device *dev = fp->dev;
+	struct page *page = req->context;
+	struct sk_buff *skb;
+	unsigned long flags;
+	int status = req->status;
+
+	switch (status) {
+	case 0:
+		spin_lock_irqsave(&fp->rx.lock, flags);
+		skb = fp->rx.skb;
+		if (!skb)
+			skb = fp->rx.skb = netdev_alloc_skb(dev, 12);
+		if (req->actual < req->length) /* Last fragment */
+			fp->rx.skb = NULL;
+		spin_unlock_irqrestore(&fp->rx.lock, flags);
+
+		if (unlikely(!skb))
+			break;
+
+		if (skb->len == 0) { /* First fragment */
+			skb->protocol = htons(ETH_P_PHONET);
+			skb_reset_mac_header(skb);
+			/* Can't use pskb_pull() on page in IRQ */
+			memcpy(skb_put(skb, 1), page_address(page), 1);
+		}
+
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+				skb->len <= 1, req->actual, PAGE_SIZE);
+		page = NULL;
+
+		if (req->actual < req->length) { /* Last fragment */
+			skb->dev = dev;
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += skb->len;
+
+			netif_rx(skb);
+		}
+		break;
+
+	/* Do not resubmit in these cases: */
+	case -ESHUTDOWN: /* disconnect */
+	case -ECONNABORTED: /* hw reset */
+	case -ECONNRESET: /* dequeued (unlink or netif down) */
+		req = NULL;
+		break;
+
+	/* Do resubmit in these cases: */
+	case -EOVERFLOW: /* request buffer overflow */
+		dev->stats.rx_over_errors++;
+	default:
+		dev->stats.rx_errors++;
+		break;
+	}
+
+	if (page)
+		put_page(page);
+	if (req)
+		pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void __pn_reset(struct usb_function *f)
+{
+	struct f_phonet *fp = func_to_pn(f);
+	struct net_device *dev = fp->dev;
+	struct phonet_port *port = netdev_priv(dev);
+
+	netif_carrier_off(dev);
+	port->usb = NULL;
+
+	usb_ep_disable(fp->out_ep);
+	usb_ep_disable(fp->in_ep);
+	if (fp->rx.skb) {
+		dev_kfree_skb_irq(fp->rx.skb);
+		fp->rx.skb = NULL;
+	}
+}
+
+static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_phonet *fp = func_to_pn(f);
+	struct usb_gadget *gadget = fp->function.config->cdev->gadget;
+
+	if (intf == pn_control_intf_desc.bInterfaceNumber)
+		/* control interface, no altsetting */
+		return (alt > 0) ? -EINVAL : 0;
+
+	if (intf == pn_data_intf_desc.bInterfaceNumber) {
+		struct net_device *dev = fp->dev;
+		struct phonet_port *port = netdev_priv(dev);
+
+		/* data intf (0: inactive, 1: active) */
+		if (alt > 1)
+			return -EINVAL;
+
+		spin_lock(&port->lock);
+		__pn_reset(f);
+		if (alt == 1) {
+			int i;
+
+			if (config_ep_by_speed(gadget, f, fp->in_ep) ||
+			    config_ep_by_speed(gadget, f, fp->out_ep)) {
+				fp->in_ep->desc = NULL;
+				fp->out_ep->desc = NULL;
+				spin_unlock(&port->lock);
+				return -EINVAL;
+			}
+			usb_ep_enable(fp->out_ep);
+			usb_ep_enable(fp->in_ep);
+
+			port->usb = fp;
+			fp->out_ep->driver_data = fp;
+			fp->in_ep->driver_data = fp;
+
+			netif_carrier_on(dev);
+			for (i = 0; i < phonet_rxq_size; i++)
+				pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
+		}
+		spin_unlock(&port->lock);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int pn_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_phonet *fp = func_to_pn(f);
+
+	if (intf == pn_control_intf_desc.bInterfaceNumber)
+		return 0;
+
+	if (intf == pn_data_intf_desc.bInterfaceNumber) {
+		struct phonet_port *port = netdev_priv(fp->dev);
+		u8 alt;
+
+		spin_lock(&port->lock);
+		alt = port->usb != NULL;
+		spin_unlock(&port->lock);
+		return alt;
+	}
+
+	return -EINVAL;
+}
+
+static void pn_disconnect(struct usb_function *f)
+{
+	struct f_phonet *fp = func_to_pn(f);
+	struct phonet_port *port = netdev_priv(fp->dev);
+	unsigned long flags;
+
+	/* remain disabled until set_alt */
+	spin_lock_irqsave(&port->lock, flags);
+	__pn_reset(f);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static __init
+int pn_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct usb_gadget *gadget = cdev->gadget;
+	struct f_phonet *fp = func_to_pn(f);
+	struct usb_ep *ep;
+	int status, i;
+
+	/* Reserve interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto err;
+	pn_control_intf_desc.bInterfaceNumber = status;
+	pn_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto err;
+	pn_data_nop_intf_desc.bInterfaceNumber = status;
+	pn_data_intf_desc.bInterfaceNumber = status;
+	pn_union_desc.bSlaveInterface0 = status;
+
+	/* Reserve endpoints */
+	status = -ENODEV;
+	ep = usb_ep_autoconfig(gadget, &pn_fs_sink_desc);
+	if (!ep)
+		goto err;
+	fp->out_ep = ep;
+	ep->driver_data = fp; /* Claim */
+
+	ep = usb_ep_autoconfig(gadget, &pn_fs_source_desc);
+	if (!ep)
+		goto err;
+	fp->in_ep = ep;
+	ep->driver_data = fp; /* Claim */
+
+	pn_hs_sink_desc.bEndpointAddress =
+		pn_fs_sink_desc.bEndpointAddress;
+	pn_hs_source_desc.bEndpointAddress =
+		pn_fs_source_desc.bEndpointAddress;
+
+	/* Do not try to bind Phonet twice... */
+	fp->function.descriptors = fs_pn_function;
+	fp->function.hs_descriptors = hs_pn_function;
+
+	/* Incoming USB requests */
+	status = -ENOMEM;
+	for (i = 0; i < phonet_rxq_size; i++) {
+		struct usb_request *req;
+
+		req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL);
+		if (!req)
+			goto err_req;
+
+		req->complete = pn_rx_complete;
+		fp->out_reqv[i] = req;
+	}
+
+	/* Outgoing USB requests */
+	fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL);
+	if (!fp->in_req)
+		goto err_req;
+
+	INFO(cdev, "USB CDC Phonet function\n");
+	INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name,
+		fp->out_ep->name, fp->in_ep->name);
+	return 0;
+
+err_req:
+	for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++)
+		usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
+err:
+
+	if (fp->out_ep)
+		fp->out_ep->driver_data = NULL;
+	if (fp->in_ep)
+		fp->in_ep->driver_data = NULL;
+	ERROR(cdev, "USB CDC Phonet: cannot autoconfigure\n");
+	return status;
+}
+
+static void
+pn_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_phonet *fp = func_to_pn(f);
+	int i;
+
+	/* We are already disconnected */
+	if (fp->in_req)
+		usb_ep_free_request(fp->in_ep, fp->in_req);
+	for (i = 0; i < phonet_rxq_size; i++)
+		if (fp->out_reqv[i])
+			usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
+
+	kfree(fp);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct net_device *dev;
+
+int __init phonet_bind_config(struct usb_configuration *c)
+{
+	struct f_phonet *fp;
+	int err, size;
+
+	size = sizeof(*fp) + (phonet_rxq_size * sizeof(struct usb_request *));
+	fp = kzalloc(size, GFP_KERNEL);
+	if (!fp)
+		return -ENOMEM;
+
+	fp->dev = dev;
+	fp->function.name = "phonet";
+	fp->function.bind = pn_bind;
+	fp->function.unbind = pn_unbind;
+	fp->function.set_alt = pn_set_alt;
+	fp->function.get_alt = pn_get_alt;
+	fp->function.disable = pn_disconnect;
+	spin_lock_init(&fp->rx.lock);
+
+	err = usb_add_function(c, &fp->function);
+	if (err)
+		kfree(fp);
+	return err;
+}
+
+int __init gphonet_setup(struct usb_gadget *gadget)
+{
+	struct phonet_port *port;
+	int err;
+
+	/* Create net device */
+	BUG_ON(dev);
+	dev = alloc_netdev(sizeof(*port), "upnlink%d", pn_net_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	port = netdev_priv(dev);
+	spin_lock_init(&port->lock);
+	netif_carrier_off(dev);
+	SET_NETDEV_DEV(dev, &gadget->dev);
+
+	err = register_netdev(dev);
+	if (err)
+		free_netdev(dev);
+	return err;
+}
+
+void gphonet_cleanup(void)
+{
+	unregister_netdev(dev);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_rndis.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_rndis.c
new file mode 100755
index 0000000..d58b4c6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_rndis.c
@@ -0,0 +1,1062 @@
+/*
+ * f_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ *                    Author: Michal Nazarewicz (mina86@mina86.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "rndis.h"
+#include "multi_packet.h"
+#include <mach/highspeed_debug.h>
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet.  The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex.  Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short:  it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets.  Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data.  The control model is built around
+ * what's essentially an "RNDIS RPC" protocol.  It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored).  RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface.  That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy.  They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely.  Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ *   - Power management ... references data that's scattered around lots
+ *     of other documentation, which is incorrect/incomplete there too.
+ *
+ *   - There are various undocumented protocol requirements, like the need
+ *     to send garbage in some control-OUT messages.
+ *
+ *   - MS-Windows drivers sometimes emit undocumented requests.
+ */
+
+struct f_rndis {
+	struct gether			port;
+	u8				ctrl_id, data_id;
+	u8				ethaddr[ETH_ALEN];
+	u32				vendorID;
+	const char			*manufacturer;
+	int				config;
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	atomic_t			notify_count;
+	int 			state;/*0 unbind;1 bind;2 open;3 close*/
+};
+struct f_rndis		*g_rndis = NULL;
+
+static inline struct f_rndis *func_to_rndis(struct usb_function *f)
+{
+	return container_of(f, struct f_rndis, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int bitrate(struct usb_gadget *g)
+{
+#ifndef USB_ETHER_REPROT_MAX_MODEM_SPEED
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return 13 * 1024 * 8 * 1000 * 8;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 * 64 * 1 * 1000 * 8;
+#else
+    return LTE_CAT4_SPEED;
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ */
+
+#define LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define STATUS_BYTECOUNT		8	/* 8 bytes data */
+
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor rndis_control_intf = {
+	.bLength =		sizeof rndis_control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =   USB_CDC_SUBCLASS_ACM,
+	.bInterfaceProtocol =   USB_CDC_ACM_PROTO_VENDOR,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc header_desc = {
+	.bLength =		sizeof header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0200),
+};
+
+static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
+	.bLength =		sizeof call_mgmt_descriptor,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+
+	.bmCapabilities =	0x00,
+	.bDataInterface =	0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_acm_descriptor = {
+	.bLength =		sizeof rndis_acm_descriptor,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+
+	.bmCapabilities =	0x00,
+};
+
+static struct usb_cdc_union_desc rndis_union_desc = {
+	.bLength =		sizeof(rndis_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_data_intf = {
+	.bLength =		sizeof rndis_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+
+static struct usb_interface_assoc_descriptor
+rndis_iad_descriptor = {
+	.bLength =		sizeof rndis_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	.bFirstInterface =	0, /* XXX, hardcoded */
+	.bInterfaceCount = 	2,	// control + data
+	.bFunctionClass =	USB_CLASS_WIRELESS_CONTROLLER, //USB_CLASS_COMM,
+	.bFunctionSubClass =	0x01, //USB_CDC_SUBCLASS_ETHERNET,
+	.bFunctionProtocol =	0x03, //USB_CDC_PROTO_NONE,
+	/* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(STATUS_BYTECOUNT),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_fs_function[] = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_union_desc,
+	(struct usb_descriptor_header *) &fs_notify_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_data_intf,
+	(struct usb_descriptor_header *) &fs_in_desc,
+	(struct usb_descriptor_header *) &fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_endpoint_descriptor hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_hs_function[] = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_union_desc,
+	(struct usb_descriptor_header *) &hs_notify_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_data_intf,
+	(struct usb_descriptor_header *) &hs_in_desc,
+	(struct usb_descriptor_header *) &hs_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
+	.bLength =		sizeof ss_intr_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+	.bLength =		sizeof ss_bulk_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *eth_ss_function[] = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_union_desc,
+	(struct usb_descriptor_header *) &ss_notify_desc,
+	(struct usb_descriptor_header *) &ss_intr_comp_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_data_intf,
+	(struct usb_descriptor_header *) &ss_in_desc,
+	(struct usb_descriptor_header *) &ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &ss_out_desc,
+	(struct usb_descriptor_header *) &ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_string_defs[] = {
+	[0].s = "RNDIS Communications Control",
+	[1].s = "RNDIS Ethernet Data",
+	[2].s = "RNDIS",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rndis_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_strings[] = {
+	&rndis_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static struct sk_buff *rndis_add_header(struct gether *port,
+					struct sk_buff *skb)
+{
+	struct sk_buff *skb2;
+
+	skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
+	if (skb2)
+		rndis_add_hdr(skb2);
+
+	dev_kfree_skb_any(skb);
+	return skb2;
+}
+
+static void rndis_response_available(void *_rndis)
+{
+	struct f_rndis			*rndis = _rndis;
+	struct usb_request		*req = rndis->notify_req;
+	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
+	__le32				*data = req->buf;
+	int				status;
+
+	if (atomic_inc_return(&rndis->notify_count) != 1)
+		return;
+
+	/* Send RNDIS RESPONSE_AVAILABLE notification; a
+	 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+	 *
+	 * This is the only notification defined by RNDIS.
+	 */
+	data[0] = cpu_to_le32(1);
+	data[1] = cpu_to_le32(0);
+
+	status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+	if (status) {
+		atomic_dec(&rndis->notify_count);
+		DBG(cdev, "notify/0 --> %d\n", status);
+	}
+}
+char *g_sent_buf = NULL;
+static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rndis			*rndis = req->context;
+	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
+	int				status = req->status;
+
+	/* after TX:
+	 *  - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+	 *  - RNDIS_RESPONSE_AVAILABLE (status/irq)
+	 */
+	 g_sent_buf = (char *)req->buf;
+	USBSTACK_DBG("%s ret: %d, req:0x%p, buf:0x%p",__func__, status, req, g_sent_buf);
+//    USBSTACK_DBG("%s, %u", __func__, __LINE__);
+//    usb_dbg_ep0reg();
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&rndis->notify_count, 0);
+		break;
+	default:
+		printk( "RNDIS %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != rndis->notify)
+			break;
+
+		/* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&rndis->notify_count))
+			break;
+		status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&rndis->notify_count);
+			DBG(cdev, "notify/1 --> %d\n", status);
+		}
+		break;
+	}
+}
+
+static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rndis			*rndis = req->context;
+	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
+	int				status;
+
+	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+//	spin_lock(&dev->lock);
+//    USBREG_DBG("%s, %u", __func__, __LINE__);
+//    usb_dbg_ep0reg();
+
+	if(req->status == -ECONNRESET){
+		USBSTACK_DBG("rndis_command_complete, status error\n");
+		return;
+	}
+	
+	status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
+	USBSTACK_DBG("%s %u, ret: %d, len:%d",__func__, __LINE__, status, req->actual);
+	
+#if 0 //def CONFIG_ARCH_ZX297520V3_MDL
+	if(status == req->actual)
+		BUG_ON(1);
+#endif
+
+	if (status < 0){
+		ERROR(cdev, "RNDIS command error %d, %d/%d\n",
+			status, req->actual, req->length);
+		USBSTACK_DBG("RNDIS command error %d, actual:%d,length:%d\n",
+			status, req->actual, req->length);	
+	}
+//	spin_unlock(&dev->lock);
+}
+
+static int
+rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	USBSTACK_DBG("%s brequesttype: 0x%x, brequest:0x%x",__func__, ctrl->bRequestType, ctrl->bRequest);
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* RNDIS uses the CDC command encapsulation mechanism to implement
+	 * an RPC scheme, with much getting/setting of attributes by OID.
+	 */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if(w_length > cdev->bufsiz){
+			USBSTACK_DBG("rndis DATA length is out of memory\n");
+			goto invalid;
+		}
+			
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		/* read the request; process it later */
+		value = w_length;
+		req->complete = rndis_command_complete;
+		req->context = rndis;
+		/* later, rndis_response_available() sends a notification */
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		else {
+			u8 *buf;
+			u32 n;
+			u32 MsgType;
+			__le32 *tmp;
+
+			/* return the result */
+			buf = rndis_get_next_response(rndis->config, &n);
+			if (buf) {
+				memcpy(req->buf, buf, n);
+				req->complete = rndis_response_complete;
+				req->context = rndis;
+
+				tmp = (__le32 *)buf;
+				MsgType   = get_unaligned_le32(tmp++);
+			
+				rndis_free_response(rndis->config, buf);
+				value = n;
+			}else{
+				printk("rndis_setup, resp buf is NULL,error\n");
+			}	
+			/* else stalls ... spec says to avoid that */
+		}
+		break;
+
+	default:
+invalid:
+		VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) 
+	{
+		DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (value < w_length);
+		req->length = value;
+		USBSTACK_DBG("rndis_setup,length:%d,value:%d\n",req->length,value);
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			usb_dbg_printf("rndis response on err %d\n", value);
+			//ERROR(cdev, "rndis response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	int ret = 0;
+	USBSTACK_DBG("%s", __func__);
+	struct f_rndis		*rndis = func_to_rndis(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* we know alt == 0 */
+
+	if (intf == rndis->ctrl_id) {
+		if (rndis->notify->driver_data) {
+			VDBG(cdev, "reset rndis control %d\n", intf);
+			ret = usb_ep_disable(rndis->notify);
+		}
+		if (!rndis->notify->desc) {
+			VDBG(cdev, "init rndis ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+				goto fail;
+		}
+		ret = usb_ep_enable(rndis->notify);
+		rndis->notify->driver_data = rndis;
+
+	} else if (intf == rndis->data_id) {
+		struct net_device	*net;
+
+		if (rndis->port.in_ep->driver_data) {
+			DBG(cdev, "reset rndis\n");
+			gether_disconnect(&rndis->port);
+		}
+
+		if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
+			DBG(cdev, "init rndis\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       rndis->port.in_ep) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       rndis->port.out_ep)) {
+				rndis->port.in_ep->desc = NULL;
+				rndis->port.out_ep->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* Avoid ZLPs; they can be troublesome. */
+		rndis->port.is_zlp_ok = false;
+
+		/* RNDIS should be in the "RNDIS uninitialized" state,
+		 * either never activated or after rndis_uninit().
+		 *
+		 * We don't want data to flow here until a nonzero packet
+		 * filter is set, at which point it enters "RNDIS data
+		 * initialized" state ... but we do want the endpoints
+		 * to be activated.  It's a strange little state.
+		 *
+		 * REVISIT the RNDIS gadget code has done this wrong for a
+		 * very long time.  We need another call to the link layer
+		 * code -- gether_updown(...bool) maybe -- to do it right.
+		 */
+		rndis->port.cdc_filter = 0;
+
+		DBG(cdev, "RNDIS RX/TX early activation ... \n");
+		net = gether_connect(&rndis->port);
+		if (IS_ERR(net))
+			return PTR_ERR(net);
+
+		rndis_set_param_dev(rndis->config, net,
+				&rndis->port.cdc_filter);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static void rndis_disable(struct usb_function *f)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	if (!rndis->notify->driver_data)
+		return;
+
+	DBG(cdev, "rndis deactivated\n");
+	USBSTACK_DBG("%s", __func__);
+
+	rndis_uninit(rndis->config);
+	gether_disconnect(&rndis->port);
+
+	usb_ep_disable(rndis->notify);
+	rndis->notify->driver_data = NULL;
+}
+#ifdef CONFIG_PM
+unsigned int g_rndis_suspend_cnt = 0;
+unsigned int g_rndis_resume_cnt = 0;
+static void rndis_suspend(struct usb_function *f)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	
+	if (!rndis->notify->driver_data)
+		return;
+	
+    g_rndis_suspend_cnt++;
+	usb_printk("%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_rndis_suspend_cnt);
+	USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_rndis_suspend_cnt);
+//	gether_disconnect(&rndis->port);
+    rndis->port.suspend_state = 1;
+    multi_packet_deactivate();
+    usb_ep_disable((&rndis->port)->in_ep);
+//    usb_ep_disable((&rndis->port)->out_ep);
+	usb_ep_disable(rndis->notify);
+    gether_uevent_eth_rndis(&rndis->port);
+}
+static void rndis_resume(struct usb_function *f)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	if (!rndis->notify->driver_data)
+		return;
+
+    g_rndis_resume_cnt++;
+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_rndis_resume_cnt);
+	USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_rndis_resume_cnt);
+//	gether_connect(&rndis->port);
+    usb_ep_resume_enable(rndis->notify);
+
+    usb_ep_resume_enable((&rndis->port)->in_ep);
+//    usb_ep_enable((&rndis->port)->out_ep);
+	rndis->port.suspend_state = 0;
+}
+#endif
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested.  A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_open(struct gether *geth)
+{
+	struct f_rndis		*rndis = func_to_rndis(&geth->func);
+	struct usb_composite_dev *cdev = geth->func.config->cdev;
+
+	USBSTACK_DBG("%s", __func__);
+
+	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3,
+				bitrate(cdev->gadget) / 100);
+	rndis_signal_connect(rndis->config);
+}
+
+static void rndis_close(struct gether *geth)
+{
+	struct f_rndis		*rndis = func_to_rndis(&geth->func);
+#if MULTIPACKET_BUF_ALLOC
+	struct usb_composite_dev *cdev = geth->func.config->cdev;
+#endif
+
+	DBG(geth->func.config->cdev, "%s\n", __func__);
+	USBSTACK_DBG("%s", __func__);
+
+	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+	rndis_signal_disconnect(rndis->config);
+
+#if 0//MULTIPACKET_BUF_ALLOC
+	multi_packet_deactivate();
+	if(rndis->state == 2){
+		usb_ep_disable((&rndis->port)->in_ep);
+	}
+	rndis->state = 3;
+	multi_packet_buf_free(&rndis->port, cdev->gadget);
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_rndis		*rndis = func_to_rndis(f);
+	int			status;
+	struct usb_ep		*ep;
+	USBSTACK_DBG("%s", __func__);
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->ctrl_id = status;
+	rndis_iad_descriptor.bFirstInterface = status;
+
+	rndis_control_intf.bInterfaceNumber = status;
+	rndis_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->data_id = status;
+
+	rndis_data_intf.bInterfaceNumber = status;
+	rndis_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
+	if (!ep)
+		goto fail;
+	rndis->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc);
+	if (!ep)
+		goto fail;
+	rndis->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* NOTE:  a status/notification endpoint is, strictly speaking,
+	 * optional.  We don't treat it that way though!  It's simpler,
+	 * and some newer profiles don't treat it as optional.
+	 */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc);
+	if (!ep)
+		goto fail;
+	rndis->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!rndis->notify_req)
+		goto fail;
+	rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!rndis->notify_req->buf)
+		goto fail;
+	rndis->notify_req->length = STATUS_BYTECOUNT;
+	rndis->notify_req->context = rndis;
+	rndis->notify_req->complete = rndis_response_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(eth_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_in_desc.bEndpointAddress =
+				fs_in_desc.bEndpointAddress;
+		hs_out_desc.bEndpointAddress =
+				fs_out_desc.bEndpointAddress;
+		hs_notify_desc.bEndpointAddress =
+				fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(eth_hs_function);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_in_desc.bEndpointAddress =
+				fs_in_desc.bEndpointAddress;
+		ss_out_desc.bEndpointAddress =
+				fs_out_desc.bEndpointAddress;
+		ss_notify_desc.bEndpointAddress =
+				fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(eth_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	rndis->port.open = rndis_open;
+	rndis->port.close = rndis_close;
+
+	status = rndis_register(rndis_response_available, rndis);
+	if (status < 0)
+		goto fail;
+	rndis->config = status;
+
+	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+	rndis_set_host_mac(rndis->config, rndis->ethaddr);
+
+	if (rndis->manufacturer && rndis->vendorID &&
+			rndis_set_param_vendor(rndis->config, rndis->vendorID,
+					       rndis->manufacturer))
+		goto fail;
+
+	multi_packet_handle_init(&rndis->port, cdev->gadget);
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			rndis->port.in_ep->name, rndis->port.out_ep->name,
+			rndis->notify->name);
+
+#if MULTIPACKET_BUF_ALLOC
+	rndis->state = 1;
+#endif
+	return 0;
+
+fail:
+	if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+
+	if (rndis->notify_req) {
+		kfree(rndis->notify_req->buf);
+		usb_ep_free_request(rndis->notify, rndis->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (rndis->notify)
+		rndis->notify->driver_data = NULL;
+	if (rndis->port.out_ep)
+		rndis->port.out_ep->driver_data = NULL;
+	if (rndis->port.in_ep)
+		rndis->port.in_ep->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+rndis_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+	USBSTACK_DBG("%s", __func__);
+
+	multi_packet_handle_exit();
+
+	rndis_deregister(rndis->config);
+	rndis_exit();
+
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(rndis->notify_req->buf);
+	usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+#if MULTIPACKET_BUF_ALLOC
+	rndis->state = 0;
+#endif
+	g_rndis = NULL;
+	kfree(rndis);
+}
+
+/* Some controllers can't support RNDIS ... */
+static inline bool can_support_rndis(struct usb_configuration *c)
+{
+	/* everything else is *presumably* fine */
+	return true;
+}
+
+/**
+ * rndis_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	USBSTACK_DBG("%s", __func__);
+
+	return rndis_bind_config_vendor(c, ethaddr, 0, NULL);
+}
+
+int
+rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer)
+{
+	struct f_rndis	*rndis;
+	int		status;
+
+	if (!can_support_rndis(c) || !ethaddr)
+	{
+	    USBSTACK_DBG("%s %u error ethaddr:%p", __func__, __LINE__, ethaddr);
+		return -EINVAL;
+	}
+	
+	USBSTACK_DBG("%s", __func__);
+
+	/* setup RNDIS itself */
+	status = rndis_init();
+	if (status < 0)
+		return status;
+
+	/* maybe allocate device-global string IDs */
+	if (rndis_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_string_defs[0].id = status;
+		rndis_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_string_defs[1].id = status;
+		rndis_data_intf.iInterface = status;
+
+		/* IAD iFunction label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_string_defs[2].id = status;
+		rndis_iad_descriptor.iFunction = status;
+	}
+
+	/* allocate and initialize one new instance */
+	status = -ENOMEM;
+	rndis = kzalloc(sizeof *rndis, GFP_KERNEL);
+	if (!rndis)
+		goto fail;
+
+	memcpy(rndis->ethaddr, ethaddr, ETH_ALEN);
+	rndis->vendorID = vendorID;
+	rndis->manufacturer = manufacturer;
+
+	/* RNDIS activates when the host changes this filter */
+	rndis->port.cdc_filter = 0;
+
+	/* RNDIS has special (and complex) framing */
+	rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
+	rndis->port.wrap = rndis_add_header;
+	rndis->port.unwrap = rndis_rm_hdr;
+
+	rndis->port.func.name = "rndis";
+	rndis->port.func.strings = rndis_strings;
+	/* descriptors are per-instance copies */
+	rndis->port.func.bind = rndis_bind;
+	rndis->port.func.unbind = rndis_unbind;
+	rndis->port.func.set_alt = rndis_set_alt;
+	rndis->port.func.setup = rndis_setup;
+	rndis->port.func.disable = rndis_disable;
+#if 0 //#ifdef CONFIG_PM
+	rndis->port.func.suspend = rndis_suspend;
+	rndis->port.func.resume = rndis_resume;
+#endif
+	status = usb_add_function(c, &rndis->port.func);
+	if (status) {
+		kfree(rndis);
+fail:
+		rndis_exit();
+	}else{
+		g_rndis = rndis;
+	}
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_serial.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_serial.c
new file mode 100644
index 0000000..bdabc1f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_serial.c
@@ -0,0 +1,385 @@
+/*
+ * f_serial.c - generic USB serial function driver
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include "u_serial.h"
+#include "gadget_chips.h"
+
+
+/*
+ * This function packages a simple "generic serial" port with no real
+ * control mechanisms, just raw data transfer over two bulk endpoints.
+ *
+ * Because it's not standardized, this isn't as interoperable as the
+ * CDC ACM driver.  However, for many purposes it's just as functional
+ * if you can arrange appropriate host side drivers.
+ */
+struct f_gser {
+	struct gserial			port;
+	u8				data_id;
+	u8				port_num;
+};
+
+static inline struct f_gser *func_to_gser(struct usb_function *f)
+{
+	return container_of(f, struct f_gser, port.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* interface descriptor: */
+
+//static struct usb_interface_descriptor gser_interface_desc __initdata = {
+static struct usb_interface_descriptor gser_interface_desc  = {
+
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_SUBCLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+//static struct usb_endpoint_descriptor gser_fs_in_desc __initdata = {
+static struct usb_endpoint_descriptor gser_fs_in_desc  = {
+
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+//static struct usb_endpoint_descriptor gser_fs_out_desc __initdata = {
+static struct usb_endpoint_descriptor gser_fs_out_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+//static struct usb_descriptor_header *gser_fs_function[] __initdata = {
+static struct usb_descriptor_header *gser_fs_function[] = {
+	(struct usb_descriptor_header *) &gser_interface_desc,
+	(struct usb_descriptor_header *) &gser_fs_in_desc,
+	(struct usb_descriptor_header *) &gser_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+//static struct usb_endpoint_descriptor gser_hs_in_desc __initdata = {
+static struct usb_endpoint_descriptor gser_hs_in_desc  = {
+
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+//static struct usb_endpoint_descriptor gser_hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor gser_hs_out_desc  = {
+
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+//static struct usb_descriptor_header *gser_hs_function[] __initdata = {
+static struct usb_descriptor_header *gser_hs_function[]  = {
+
+	(struct usb_descriptor_header *) &gser_interface_desc,
+	(struct usb_descriptor_header *) &gser_hs_in_desc,
+	(struct usb_descriptor_header *) &gser_hs_out_desc,
+	NULL,
+};
+
+static struct usb_endpoint_descriptor gser_ss_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor gser_ss_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor gser_ss_bulk_comp_desc __initdata = {
+	.bLength =              sizeof gser_ss_bulk_comp_desc,
+	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *gser_ss_function[] __initdata = {
+	(struct usb_descriptor_header *) &gser_interface_desc,
+	(struct usb_descriptor_header *) &gser_ss_in_desc,
+	(struct usb_descriptor_header *) &gser_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &gser_ss_out_desc,
+	(struct usb_descriptor_header *) &gser_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string gser_string_defs[] = {
+	[0].s = "Generic Serial",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings gser_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		gser_string_defs,
+};
+
+static struct usb_gadget_strings *gser_strings[] = {
+	&gser_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	int ret = 0;
+	struct f_gser		*gser = func_to_gser(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+#ifdef CONFIG_PM
+	if(gser->port.suspend_state == 1)
+		gser->port.suspend_state = 0;
+#endif
+	/* we know alt == 0, so this is an activation or a reset */
+
+	if (gser->port.in->driver_data) {
+		DBG(cdev, "reset generic ttyGS%d\n", gser->port_num);
+		gserial_disconnect(&gser->port);
+	}
+	if (!gser->port.in->desc || !gser->port.out->desc) {
+		DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
+		if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+		    config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+			gser->port.in->desc = NULL;
+			gser->port.out->desc = NULL;
+			return -EINVAL;
+		}
+	}
+	ret = gserial_connect(&gser->port, &gser->port_num);
+	printk("gser_set_alt, port_num:%d, ret:%x\n", gser->port_num, ret);
+	return ret;
+}
+
+static void gser_disable(struct usb_function *f)
+{
+	struct f_gser	*gser = func_to_gser(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+#if 0
+	if(gser->port.suspend_state == 1)
+		return;
+#endif
+	DBG(cdev, "generic ttyGS%d deactivated\n", gser->port_num);
+	gserial_disconnect(&gser->port);
+}
+
+#ifdef CONFIG_PM
+unsigned int g_gser_suspend_cnt = 0;
+unsigned int g_gser_resume_cnt = 0;
+static void gser_suspend(struct usb_function *f)
+{
+	struct f_gser	*gser = func_to_gser(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+    g_gser_suspend_cnt++;
+	usb_printk( "%s, %u, suspendcnt:%d\n", __func__, __LINE__, g_gser_suspend_cnt);
+	USBSTACK_DBG("%s, %u suspendcnt:%d", __func__, __LINE__, g_gser_suspend_cnt);
+//	gser_disable(f);
+ 	gserial_disconnect_ext(&gser->port);
+    gser->port.suspend_state = 1;
+//    usb_ep_disable((&gser->port)->out);	
+    usb_ep_disable((&gser->port)->in);
+}
+static void gser_resume(struct usb_function *f)
+{
+	struct f_gser	*gser = func_to_gser(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+    g_gser_resume_cnt++;
+	usb_printk("%s, %u, resumecnt:%d\n", __func__, __LINE__, g_gser_resume_cnt);
+	USBSTACK_DBG("%s, %u resumecnt:%d", __func__, __LINE__, g_gser_resume_cnt);
+//	gser_set_alt(f, NULL, NULL);
+	//usb_ep_enable((&gser->port)->in);
+	usb_ep_resume_enable((&gser->port)->in);
+//    usb_ep_enable((&gser->port)->out);
+ 	gserial_connect_ext(&gser->port);
+	gser->port.suspend_state = 0;
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* serial function driver setup/binding */
+
+//static int __init
+static int 
+gser_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_gser		*gser = func_to_gser(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	gser->data_id = status;
+	gser_interface_desc.bInterfaceNumber = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_in_desc);
+	if (!ep)
+		goto fail;
+	gser->port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_out_desc);
+	if (!ep)
+		goto fail;
+	gser->port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(gser_fs_function);
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		gser_hs_in_desc.bEndpointAddress =
+				gser_fs_in_desc.bEndpointAddress;
+		gser_hs_out_desc.bEndpointAddress =
+				gser_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(gser_hs_function);
+	}
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		gser_ss_in_desc.bEndpointAddress =
+			gser_fs_in_desc.bEndpointAddress;
+		gser_ss_out_desc.bEndpointAddress =
+			gser_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(gser_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	DBG(cdev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
+			gser->port_num,
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			gser->port.in->name, gser->port.out->name);
+	return 0;
+
+fail:
+	/* we might as well release our claims on endpoints */
+	if (gser->port.out)
+		gser->port.out->driver_data = NULL;
+	if (gser->port.in)
+		gser->port.in->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+gser_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	usb_free_descriptors(f->descriptors);
+	kfree(func_to_gser(f));
+}
+
+/**
+ * gser_bind_config - add a generic serial function to a configuration
+ * @c: the configuration to support the serial instance
+ * @port_num: /dev/ttyGS* port this interface will use
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gserial_setup() with enough ports to
+ * handle all the ones it binds.  Caller is also responsible
+ * for calling @gserial_cleanup() before module unload.
+ */
+//int __init gser_bind_config(struct usb_configuration *c, u8 port_num)
+int  gser_bind_config(struct usb_configuration *c, u8 port_num)
+
+{
+	struct f_gser	*gser;
+	int		status;
+
+	/* REVISIT might want instance-specific strings to help
+	 * distinguish instances ...
+	 */
+	 USB_DEBUG("PORT_NUM:%d", port_num);
+
+	/* maybe allocate device-global string ID */
+	if (gser_string_defs[0].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		gser_string_defs[0].id = status;
+	}
+
+	USB_DEBUG("MALLOC before");
+
+	/* allocate and initialize one new instance */
+	gser = kzalloc(sizeof *gser, GFP_KERNEL);
+	if (!gser)
+		return -ENOMEM;
+	USB_DEBUG("MALLOC end");
+
+	gser->port_num = port_num;
+
+	gser->port.func.name = "gser";
+	gser->port.func.strings = gser_strings;
+	gser->port.func.bind = gser_bind;
+	gser->port.func.unbind = gser_unbind;
+	gser->port.func.set_alt = gser_set_alt;
+	gser->port.func.disable = gser_disable;
+#if 0 //#ifdef CONFIG_PM
+    gser->port.func.suspend = gser_suspend;
+    gser->port.func.resume = gser_resume;
+#endif
+
+	status = usb_add_function(c, &gser->port.func);
+	if (status)
+		kfree(gser);
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_sourcesink.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_sourcesink.c
new file mode 100644
index 0000000..7aa7ac8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_sourcesink.c
@@ -0,0 +1,584 @@
+/*
+ * f_sourcesink.c - USB peripheral source/sink configuration driver
+ *
+ * Copyright (C) 2003-2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include "g_zero.h"
+#include "gadget_chips.h"
+
+
+/*
+ * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
+ * controller drivers.
+ *
+ * This just sinks bulk packets OUT to the peripheral and sources them IN
+ * to the host, optionally with specific data patterns for integrity tests.
+ * As such it supports basic functionality and load tests.
+ *
+ * In terms of control messaging, this supports all the standard requests
+ * plus two that support control-OUT tests.  If the optional "autoresume"
+ * mode is enabled, it provides good functional coverage for the "USBCV"
+ * test harness from USB-IF.
+ *
+ * Note that because this doesn't queue more than one request at a time,
+ * some other function must be used to test queueing logic.  The network
+ * link (g_ether) is the best overall option for that, since its TX and RX
+ * queues are relatively independent, will receive a range of packet sizes,
+ * and can often be made to run out completely.  Those issues are important
+ * when stress testing peripheral controller drivers.
+ *
+ *
+ * This is currently packaged as a configuration driver, which can't be
+ * combined with other functions to make composite devices.  However, it
+ * can be combined with other independent configurations.
+ */
+struct f_sourcesink {
+	struct usb_function	function;
+
+	struct usb_ep		*in_ep;
+	struct usb_ep		*out_ep;
+};
+
+static inline struct f_sourcesink *func_to_ss(struct usb_function *f)
+{
+	return container_of(f, struct f_sourcesink, function);
+}
+
+static unsigned pattern;
+module_param(pattern, uint, 0);
+MODULE_PARM_DESC(pattern, "0 = all zeroes, 1 = mod63 ");
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_interface_descriptor source_sink_intf = {
+	.bLength =		sizeof source_sink_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_source_sink_descs[] = {
+	(struct usb_descriptor_header *) &source_sink_intf,
+	(struct usb_descriptor_header *) &fs_sink_desc,
+	(struct usb_descriptor_header *) &fs_source_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *hs_source_sink_descs[] = {
+	(struct usb_descriptor_header *) &source_sink_intf,
+	(struct usb_descriptor_header *) &hs_source_desc,
+	(struct usb_descriptor_header *) &hs_sink_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_source_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+struct usb_ss_ep_comp_descriptor ss_source_comp_desc = {
+	.bLength =		USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst =		0,
+	.bmAttributes =		0,
+	.wBytesPerInterval =	0,
+};
+
+static struct usb_endpoint_descriptor ss_sink_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+struct usb_ss_ep_comp_descriptor ss_sink_comp_desc = {
+	.bLength =		USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst =		0,
+	.bmAttributes =		0,
+	.wBytesPerInterval =	0,
+};
+
+static struct usb_descriptor_header *ss_source_sink_descs[] = {
+	(struct usb_descriptor_header *) &source_sink_intf,
+	(struct usb_descriptor_header *) &ss_source_desc,
+	(struct usb_descriptor_header *) &ss_source_comp_desc,
+	(struct usb_descriptor_header *) &ss_sink_desc,
+	(struct usb_descriptor_header *) &ss_sink_comp_desc,
+	NULL,
+};
+
+/* function-specific strings: */
+
+static struct usb_string strings_sourcesink[] = {
+	[0].s = "source and sink data",
+	{  }			/* end of list */
+};
+
+static struct usb_gadget_strings stringtab_sourcesink = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_sourcesink,
+};
+
+static struct usb_gadget_strings *sourcesink_strings[] = {
+	&stringtab_sourcesink,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init
+sourcesink_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_sourcesink	*ss = func_to_ss(f);
+	int	id;
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	source_sink_intf.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc);
+	if (!ss->in_ep) {
+autoconf_fail:
+		ERROR(cdev, "%s: can't autoconfigure on %s\n",
+			f->name, cdev->gadget->name);
+		return -ENODEV;
+	}
+	ss->in_ep->driver_data = cdev;	/* claim */
+
+	ss->out_ep = usb_ep_autoconfig(cdev->gadget, &fs_sink_desc);
+	if (!ss->out_ep)
+		goto autoconf_fail;
+	ss->out_ep->driver_data = cdev;	/* claim */
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_source_desc.bEndpointAddress =
+				fs_source_desc.bEndpointAddress;
+		hs_sink_desc.bEndpointAddress =
+				fs_sink_desc.bEndpointAddress;
+		f->hs_descriptors = hs_source_sink_descs;
+	}
+
+	/* support super speed hardware */
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_source_desc.bEndpointAddress =
+				fs_source_desc.bEndpointAddress;
+		ss_sink_desc.bEndpointAddress =
+				fs_sink_desc.bEndpointAddress;
+		f->ss_descriptors = ss_source_sink_descs;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+	    (gadget_is_superspeed(c->cdev->gadget) ? "super" :
+	     (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
+			f->name, ss->in_ep->name, ss->out_ep->name);
+	return 0;
+}
+
+static void
+sourcesink_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	kfree(func_to_ss(f));
+}
+
+/* optionally require specific source/sink data patterns  */
+static int check_read_data(struct f_sourcesink *ss, struct usb_request *req)
+{
+	unsigned		i;
+	u8			*buf = req->buf;
+	struct usb_composite_dev *cdev = ss->function.config->cdev;
+
+	for (i = 0; i < req->actual; i++, buf++) {
+		switch (pattern) {
+
+		/* all-zeroes has no synchronization issues */
+		case 0:
+			if (*buf == 0)
+				continue;
+			break;
+
+		/* "mod63" stays in sync with short-terminated transfers,
+		 * OR otherwise when host and gadget agree on how large
+		 * each usb transfer request should be.  Resync is done
+		 * with set_interface or set_config.  (We *WANT* it to
+		 * get quickly out of sync if controllers or their drivers
+		 * stutter for any reason, including buffer duplcation...)
+		 */
+		case 1:
+			if (*buf == (u8)(i % 63))
+				continue;
+			break;
+		}
+		ERROR(cdev, "bad OUT byte, buf[%d] = %d\n", i, *buf);
+		usb_ep_set_halt(ss->out_ep);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void reinit_write_data(struct usb_ep *ep, struct usb_request *req)
+{
+	unsigned	i;
+	u8		*buf = req->buf;
+
+	switch (pattern) {
+	case 0:
+		memset(req->buf, 0, req->length);
+		break;
+	case 1:
+		for  (i = 0; i < req->length; i++)
+			*buf++ = (u8) (i % 63);
+		break;
+	}
+}
+
+static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_sourcesink	*ss = ep->driver_data;
+	struct usb_composite_dev *cdev = ss->function.config->cdev;
+	int			status = req->status;
+
+	switch (status) {
+
+	case 0:				/* normal completion? */
+		if (ep == ss->out_ep) {
+			check_read_data(ss, req);
+			memset(req->buf, 0x55, req->length);
+		} else
+			reinit_write_data(ep, req);
+		break;
+
+	/* this endpoint is normally active while we're configured */
+	case -ECONNABORTED:		/* hardware forced ep reset */
+	case -ECONNRESET:		/* request dequeued */
+	case -ESHUTDOWN:		/* disconnect from host */
+		VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status,
+				req->actual, req->length);
+		if (ep == ss->out_ep)
+			check_read_data(ss, req);
+		free_ep_req(ep, req);
+		return;
+
+	case -EOVERFLOW:		/* buffer overrun on read means that
+					 * we didn't provide a big enough
+					 * buffer.
+					 */
+	default:
+#if 1
+		DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name,
+				status, req->actual, req->length);
+#endif
+	case -EREMOTEIO:		/* short read */
+		break;
+	}
+
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (status) {
+		ERROR(cdev, "kill %s:  resubmit %d bytes --> %d\n",
+				ep->name, req->length, status);
+		usb_ep_set_halt(ep);
+		/* FIXME recover later ... somehow */
+	}
+}
+
+static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in)
+{
+	struct usb_ep		*ep;
+	struct usb_request	*req;
+	int			status;
+
+	ep = is_in ? ss->in_ep : ss->out_ep;
+	req = alloc_ep_req(ep);
+	if (!req)
+		return -ENOMEM;
+
+	req->complete = source_sink_complete;
+	if (is_in)
+		reinit_write_data(ep, req);
+	else
+		memset(req->buf, 0x55, req->length);
+
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (status) {
+		struct usb_composite_dev	*cdev;
+
+		cdev = ss->function.config->cdev;
+		ERROR(cdev, "start %s %s --> %d\n",
+				is_in ? "IN" : "OUT",
+				ep->name, status);
+		free_ep_req(ep, req);
+	}
+
+	return status;
+}
+
+static void disable_source_sink(struct f_sourcesink *ss)
+{
+	struct usb_composite_dev	*cdev;
+
+	cdev = ss->function.config->cdev;
+	disable_endpoints(cdev, ss->in_ep, ss->out_ep);
+	VDBG(cdev, "%s disabled\n", ss->function.name);
+}
+
+static int
+enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss)
+{
+	int					result = 0;
+	struct usb_ep				*ep;
+
+	/* one endpoint writes (sources) zeroes IN (to the host) */
+	ep = ss->in_ep;
+	result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
+	if (result)
+		return result;
+	result = usb_ep_enable(ep);
+	if (result < 0)
+		return result;
+	ep->driver_data = ss;
+
+	result = source_sink_start_ep(ss, true);
+	if (result < 0) {
+fail:
+		ep = ss->in_ep;
+		usb_ep_disable(ep);
+		ep->driver_data = NULL;
+		return result;
+	}
+
+	/* one endpoint reads (sinks) anything OUT (from the host) */
+	ep = ss->out_ep;
+	result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
+	if (result)
+		goto fail;
+	result = usb_ep_enable(ep);
+	if (result < 0)
+		goto fail;
+	ep->driver_data = ss;
+
+	result = source_sink_start_ep(ss, false);
+	if (result < 0) {
+		usb_ep_disable(ep);
+		ep->driver_data = NULL;
+		goto fail;
+	}
+
+	DBG(cdev, "%s enabled\n", ss->function.name);
+	return result;
+}
+
+static int sourcesink_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct f_sourcesink	*ss = func_to_ss(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* we know alt is zero */
+	if (ss->in_ep->driver_data)
+		disable_source_sink(ss);
+	return enable_source_sink(cdev, ss);
+}
+
+static void sourcesink_disable(struct usb_function *f)
+{
+	struct f_sourcesink	*ss = func_to_ss(f);
+
+	disable_source_sink(ss);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init sourcesink_bind_config(struct usb_configuration *c)
+{
+	struct f_sourcesink	*ss;
+	int			status;
+
+	ss = kzalloc(sizeof *ss, GFP_KERNEL);
+	if (!ss)
+		return -ENOMEM;
+
+	ss->function.name = "source/sink";
+	ss->function.descriptors = fs_source_sink_descs;
+	ss->function.bind = sourcesink_bind;
+	ss->function.unbind = sourcesink_unbind;
+	ss->function.set_alt = sourcesink_set_alt;
+	ss->function.disable = sourcesink_disable;
+
+	status = usb_add_function(c, &ss->function);
+	if (status)
+		kfree(ss);
+	return status;
+}
+
+static int sourcesink_setup(struct usb_configuration *c,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_request	*req = c->cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	req->length = USB_BUFSIZ;
+
+	/* composite driver infrastructure handles everything except
+	 * the two control test requests.
+	 */
+	switch (ctrl->bRequest) {
+
+	/*
+	 * These are the same vendor-specific requests supported by
+	 * Intel's USB 2.0 compliance test devices.  We exceed that
+	 * device spec by allowing multiple-packet requests.
+	 *
+	 * NOTE:  the Control-OUT data stays in req->buf ... better
+	 * would be copying it into a scratch buffer, so that other
+	 * requests may safely intervene.
+	 */
+	case 0x5b:	/* control WRITE test -- fill the buffer */
+		if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR))
+			goto unknown;
+		if (w_value || w_index)
+			break;
+		/* just read that many bytes into the buffer */
+		if (w_length > req->length)
+			break;
+		value = w_length;
+		break;
+	case 0x5c:	/* control READ test -- return the buffer */
+		if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR))
+			goto unknown;
+		if (w_value || w_index)
+			break;
+		/* expect those bytes are still in the buffer; send back */
+		if (w_length > req->length)
+			break;
+		value = w_length;
+		break;
+
+	default:
+unknown:
+		VDBG(c->cdev,
+			"unknown control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		VDBG(c->cdev, "source/sink req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(c->cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(c->cdev, "source/sinkc response, err %d\n",
+					value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static struct usb_configuration sourcesink_driver = {
+	.label		= "source/sink",
+	.strings	= sourcesink_strings,
+	.setup		= sourcesink_setup,
+	.bConfigurationValue = 3,
+	.bmAttributes	= USB_CONFIG_ATT_SELFPOWER,
+	/* .iConfiguration = DYNAMIC */
+};
+
+/**
+ * sourcesink_add - add a source/sink testing configuration to a device
+ * @cdev: the device to support the configuration
+ */
+int __init sourcesink_add(struct usb_composite_dev *cdev, bool autoresume)
+{
+	int id;
+
+	/* allocate string ID(s) */
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_sourcesink[0].id = id;
+
+	source_sink_intf.iInterface = id;
+	sourcesink_driver.iConfiguration = id;
+
+	/* support autoresume for remote wakeup testing */
+	if (autoresume)
+		sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+
+	/* support OTG systems */
+	if (gadget_is_otg(cdev->gadget)) {
+		sourcesink_driver.descriptors = otg_desc;
+		sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	return usb_add_config(cdev, &sourcesink_driver, sourcesink_bind_config);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_subset.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_subset.c
new file mode 100644
index 0000000..e5bb966
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_subset.c
@@ -0,0 +1,460 @@
+/*
+ * f_subset.c -- "CDC Subset" Ethernet link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This function packages a simple "CDC Subset" Ethernet port with no real
+ * control mechanisms; just raw data transfer over two bulk endpoints.
+ * The data transfer model is exactly that of CDC Ethernet, which is
+ * why we call it the "CDC Subset".
+ *
+ * Because it's not standardized, this has some interoperability issues.
+ * They mostly relate to driver binding, since the data transfer model is
+ * so simple (CDC Ethernet).  The original versions of this protocol used
+ * specific product/vendor IDs:  byteswapped IDs for Digital Equipment's
+ * SA-1100 "Itsy" board, which could run Linux 2.4 kernels and supported
+ * daughtercards with USB peripheral connectors.  (It was used more often
+ * with other boards, using the Itsy identifiers.)  Linux hosts recognized
+ * this with CONFIG_USB_ARMLINUX; these devices have only one configuration
+ * and one interface.
+ *
+ * At some point, MCCI defined a (nonconformant) CDC MDLM variant called
+ * "SAFE", which happens to have a mode which is identical to the "CDC
+ * Subset" in terms of data transfer and lack of control model.  This was
+ * adopted by later Sharp Zaurus models, and by some other software which
+ * Linux hosts recognize with CONFIG_USB_NET_ZAURUS.
+ *
+ * Because Microsoft's RNDIS drivers are far from robust, we added a few
+ * descriptors to the CDC Subset code, making this code look like a SAFE
+ * implementation.  This lets you use MCCI's host side MS-Windows drivers
+ * if you get fed up with RNDIS.  It also makes it easier for composite
+ * drivers to work, since they can use class based binding instead of
+ * caring about specific product and vendor IDs.
+ */
+
+struct f_gether {
+	struct gether			port;
+
+	char				ethaddr[14];
+};
+
+static inline struct f_gether *func_to_geth(struct usb_function *f)
+{
+	return container_of(f, struct f_gether, port.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * "Simple" CDC-subset option is a simple vendor-neutral model that most
+ * full speed controllers can handle:  one interface, two bulk endpoints.
+ * To assist host side drivers, we fancy it up a bit, and add descriptors so
+ * some host side drivers will understand it as a "SAFE" variant.
+ *
+ * "SAFE" loosely follows CDC WMC MDLM, violating the spec in various ways.
+ * Data endpoints live in the control interface, there's no data interface.
+ * And it's not used to talk to a cell phone radio.
+ */
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor subset_data_intf = {
+	.bLength =		sizeof subset_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =      USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_MDLM,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc mdlm_header_desc = {
+	.bLength =		sizeof mdlm_header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_mdlm_desc mdlm_desc = {
+	.bLength =		sizeof mdlm_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_MDLM_TYPE,
+
+	.bcdVersion =		cpu_to_le16(0x0100),
+	.bGUID = {
+		0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6,
+		0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f,
+	},
+};
+
+/* since "usb_cdc_mdlm_detail_desc" is a variable length structure, we
+ * can't really use its struct.  All we do here is say that we're using
+ * the submode of "SAFE" which directly matches the CDC Subset.
+ */
+static u8 mdlm_detail_desc[] = {
+	6,
+	USB_DT_CS_INTERFACE,
+	USB_CDC_MDLM_DETAIL_TYPE,
+
+	0,	/* "SAFE" */
+	0,	/* network control capabilities (none) */
+	0,	/* network data capabilities ("raw" encapsulation) */
+};
+
+static struct usb_cdc_ether_desc ether_desc = {
+	.bLength =		sizeof ether_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+
+	/* this descriptor actually adds value, surprise! */
+	/* .iMACAddress = DYNAMIC */
+	.bmEthernetStatistics =	cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_subset_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_subset_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_eth_function[] = {
+	(struct usb_descriptor_header *) &subset_data_intf,
+	(struct usb_descriptor_header *) &mdlm_header_desc,
+	(struct usb_descriptor_header *) &mdlm_desc,
+	(struct usb_descriptor_header *) &mdlm_detail_desc,
+	(struct usb_descriptor_header *) &ether_desc,
+	(struct usb_descriptor_header *) &fs_subset_in_desc,
+	(struct usb_descriptor_header *) &fs_subset_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_subset_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_subset_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *hs_eth_function[] = {
+	(struct usb_descriptor_header *) &subset_data_intf,
+	(struct usb_descriptor_header *) &mdlm_header_desc,
+	(struct usb_descriptor_header *) &mdlm_desc,
+	(struct usb_descriptor_header *) &mdlm_detail_desc,
+	(struct usb_descriptor_header *) &ether_desc,
+	(struct usb_descriptor_header *) &hs_subset_in_desc,
+	(struct usb_descriptor_header *) &hs_subset_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_subset_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor ss_subset_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc = {
+	.bLength =		sizeof ss_subset_bulk_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *ss_eth_function[] = {
+	(struct usb_descriptor_header *) &subset_data_intf,
+	(struct usb_descriptor_header *) &mdlm_header_desc,
+	(struct usb_descriptor_header *) &mdlm_desc,
+	(struct usb_descriptor_header *) &mdlm_detail_desc,
+	(struct usb_descriptor_header *) &ether_desc,
+	(struct usb_descriptor_header *) &ss_subset_in_desc,
+	(struct usb_descriptor_header *) &ss_subset_bulk_comp_desc,
+	(struct usb_descriptor_header *) &ss_subset_out_desc,
+	(struct usb_descriptor_header *) &ss_subset_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string geth_string_defs[] = {
+	[0].s = "CDC Ethernet Subset/SAFE",
+	[1].s = NULL /* DYNAMIC */,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings geth_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		geth_string_defs,
+};
+
+static struct usb_gadget_strings *geth_strings[] = {
+	&geth_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int geth_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_gether		*geth = func_to_geth(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct net_device	*net;
+
+	/* we know alt == 0, so this is an activation or a reset */
+
+	if (geth->port.in_ep->driver_data) {
+		DBG(cdev, "reset cdc subset\n");
+		gether_disconnect(&geth->port);
+	}
+
+	DBG(cdev, "init + activate cdc subset\n");
+	if (config_ep_by_speed(cdev->gadget, f, geth->port.in_ep) ||
+	    config_ep_by_speed(cdev->gadget, f, geth->port.out_ep)) {
+		geth->port.in_ep->desc = NULL;
+		geth->port.out_ep->desc = NULL;
+		return -EINVAL;
+	}
+
+	net = gether_connect(&geth->port);
+	return IS_ERR(net) ? PTR_ERR(net) : 0;
+}
+
+static void geth_disable(struct usb_function *f)
+{
+	struct f_gether	*geth = func_to_geth(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "net deactivated\n");
+	gether_disconnect(&geth->port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* serial function driver setup/binding */
+
+static int
+geth_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_gether		*geth = func_to_geth(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	subset_data_intf.bInterfaceNumber = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_in_desc);
+	if (!ep)
+		goto fail;
+	geth->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_out_desc);
+	if (!ep)
+		goto fail;
+	geth->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(fs_eth_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_subset_in_desc.bEndpointAddress =
+				fs_subset_in_desc.bEndpointAddress;
+		hs_subset_out_desc.bEndpointAddress =
+				fs_subset_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(hs_eth_function);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_subset_in_desc.bEndpointAddress =
+				fs_subset_in_desc.bEndpointAddress;
+		ss_subset_out_desc.bEndpointAddress =
+				fs_subset_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(ss_eth_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	DBG(cdev, "CDC Subset: %s speed IN/%s OUT/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			geth->port.in_ep->name, geth->port.out_ep->name);
+	return 0;
+
+fail:
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+	if (f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+
+	/* we might as well release our claims on endpoints */
+	if (geth->port.out_ep)
+		geth->port.out_ep->driver_data = NULL;
+	if (geth->port.in_ep)
+		geth->port.in_ep->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+geth_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+	geth_string_defs[1].s = NULL;
+	kfree(func_to_geth(f));
+}
+
+/**
+ * geth_bind_config - add CDC Subset network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	struct f_gether	*geth;
+	int		status;
+
+	if (!ethaddr)
+		return -EINVAL;
+
+	/* maybe allocate device-global string IDs */
+	if (geth_string_defs[0].id == 0) {
+
+		/* interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		geth_string_defs[0].id = status;
+		subset_data_intf.iInterface = status;
+
+		/* MAC address */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		geth_string_defs[1].id = status;
+		ether_desc.iMACAddress = status;
+	}
+
+	/* allocate and initialize one new instance */
+	geth = kzalloc(sizeof *geth, GFP_KERNEL);
+	if (!geth)
+		return -ENOMEM;
+
+	/* export host's Ethernet address in CDC format */
+	snprintf(geth->ethaddr, sizeof geth->ethaddr,
+		"%02X%02X%02X%02X%02X%02X",
+		ethaddr[0], ethaddr[1], ethaddr[2],
+		ethaddr[3], ethaddr[4], ethaddr[5]);
+	geth_string_defs[1].s = geth->ethaddr;
+
+	geth->port.cdc_filter = DEFAULT_FILTER;
+
+	geth->port.func.name = "cdc_subset";
+	geth->port.func.strings = geth_strings;
+	geth->port.func.bind = geth_bind;
+	geth->port.func.unbind = geth_unbind;
+	geth->port.func.set_alt = geth_set_alt;
+	geth->port.func.disable = geth_disable;
+
+	status = usb_add_function(c, &geth->port.func);
+	if (status) {
+		geth_string_defs[1].s = NULL;
+		kfree(geth);
+	}
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uac1.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uac1.c
new file mode 100644
index 0000000..1a5dcd5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uac1.c
@@ -0,0 +1,776 @@
+/*
+ * f_audio.c -- USB Audio class function driver
+  *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+
+#include "u_uac1.h"
+
+#define OUT_EP_MAX_PACKET_SIZE	200
+static int req_buf_size = OUT_EP_MAX_PACKET_SIZE;
+module_param(req_buf_size, int, S_IRUGO);
+MODULE_PARM_DESC(req_buf_size, "ISO OUT endpoint request buffer size");
+
+static int req_count = 256;
+module_param(req_count, int, S_IRUGO);
+MODULE_PARM_DESC(req_count, "ISO OUT endpoint request count");
+
+static int audio_buf_size = 48000;
+module_param(audio_buf_size, int, S_IRUGO);
+MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
+
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
+
+/*
+ * DESCRIPTORS ... most are static, but strings and full
+ * configuration descriptors are built on demand.
+ */
+
+/*
+ * We have two interfaces- AudioControl and AudioStreaming
+ * TODO: only supcard playback currently
+ */
+#define F_AUDIO_AC_INTERFACE	0
+#define F_AUDIO_AS_INTERFACE	1
+#define F_AUDIO_NUM_INTERFACES	2
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc __initdata = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH	UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
+	+ UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+	.bLength =		UAC_DT_AC_HEADER_LENGTH,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_HEADER,
+	.bcdADC =		__constant_cpu_to_le16(0x0100),
+	.wTotalLength =		__constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+	.bInCollection =	F_AUDIO_NUM_INTERFACES,
+	.baInterfaceNr = {
+		[0] =		F_AUDIO_AC_INTERFACE,
+		[1] =		F_AUDIO_AS_INTERFACE,
+	}
+};
+
+#define INPUT_TERMINAL_ID	1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+	.bLength =		UAC_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
+	.bTerminalID =		INPUT_TERMINAL_ID,
+	.wTerminalType =	UAC_TERMINAL_STREAMING,
+	.bAssocTerminal =	0,
+	.wChannelConfig =	0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID		2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+	.bLength		= UAC_DT_FEATURE_UNIT_SIZE(0),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_FEATURE_UNIT,
+	.bUnitID		= FEATURE_UNIT_ID,
+	.bSourceID		= INPUT_TERMINAL_ID,
+	.bControlSize		= 2,
+	.bmaControls[0]		= (UAC_FU_MUTE | UAC_FU_VOLUME),
+};
+
+static struct usb_audio_control mute_control = {
+	.list = LIST_HEAD_INIT(mute_control.list),
+	.name = "Mute Control",
+	.type = UAC_FU_MUTE,
+	/* Todo: add real Mute control code */
+	.set = generic_set_cmd,
+	.get = generic_get_cmd,
+};
+
+static struct usb_audio_control volume_control = {
+	.list = LIST_HEAD_INIT(volume_control.list),
+	.name = "Volume Control",
+	.type = UAC_FU_VOLUME,
+	/* Todo: add real Volume control code */
+	.set = generic_set_cmd,
+	.get = generic_get_cmd,
+};
+
+static struct usb_audio_control_selector feature_unit = {
+	.list = LIST_HEAD_INIT(feature_unit.list),
+	.id = FEATURE_UNIT_ID,
+	.name = "Mute & Volume Control",
+	.type = UAC_FEATURE_UNIT,
+	.desc = (struct usb_descriptor_header *)&feature_unit_desc,
+};
+
+#define OUTPUT_TERMINAL_ID	3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+	.bLength		= UAC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
+	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.wTerminalType		= UAC_OUTPUT_TERMINAL_SPEAKER,
+	.bAssocTerminal		= FEATURE_UNIT_ID,
+	.bSourceID		= FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+	.bLength =		UAC_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_AS_GENERAL,
+	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bDelay =		1,
+	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
+	.bFormatType =		UAC_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_out_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_SYNC_ADAPTIVE
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(OUT_EP_MAX_PACKET_SIZE),
+	.bInterval =		4,
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_out_desc __initdata = {
+	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	UAC_EP_GENERAL,
+	.bmAttributes = 	1,
+	.bLockDelayUnits =	1,
+	.wLockDelay =		__constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *f_audio_desc[] __initdata = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&as_out_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_out_desc,
+	NULL,
+};
+
+/*
+ * This function is an ALSA sound card following USB Audio Class Spec 1.0.
+ */
+
+/*-------------------------------------------------------------------------*/
+struct f_audio_buf {
+	u8 *buf;
+	int actual;
+	struct list_head list;
+};
+
+static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
+{
+	struct f_audio_buf *copy_buf;
+
+	copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
+	if (!copy_buf)
+		return ERR_PTR(-ENOMEM);
+
+	copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
+	if (!copy_buf->buf) {
+		kfree(copy_buf);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return copy_buf;
+}
+
+static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
+{
+	kfree(audio_buf->buf);
+	kfree(audio_buf);
+}
+/*-------------------------------------------------------------------------*/
+
+struct f_audio {
+	struct gaudio			card;
+
+	/* endpoints handle full and/or high speeds */
+	struct usb_ep			*out_ep;
+
+	spinlock_t			lock;
+	struct f_audio_buf *copy_buf;
+	struct work_struct playback_work;
+	struct list_head play_queue;
+
+	/* Control Set command */
+	struct list_head cs;
+	u8 set_cmd;
+	struct usb_audio_control *set_con;
+};
+
+static inline struct f_audio *func_to_audio(struct usb_function *f)
+{
+	return container_of(f, struct f_audio, card.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_playback_work(struct work_struct *data)
+{
+	struct f_audio *audio = container_of(data, struct f_audio,
+					playback_work);
+	struct f_audio_buf *play_buf;
+
+	spin_lock_irq(&audio->lock);
+	if (list_empty(&audio->play_queue)) {
+		spin_unlock_irq(&audio->lock);
+		return;
+	}
+	play_buf = list_first_entry(&audio->play_queue,
+			struct f_audio_buf, list);
+	list_del(&play_buf->list);
+	spin_unlock_irq(&audio->lock);
+
+	u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
+	f_audio_buffer_free(play_buf);
+}
+
+static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_audio *audio = req->context;
+	struct usb_composite_dev *cdev = audio->card.func.config->cdev;
+	struct f_audio_buf *copy_buf = audio->copy_buf;
+	int err;
+
+	if (!copy_buf)
+		return -EINVAL;
+
+	/* Copy buffer is full, add it to the play_queue */
+	if (audio_buf_size - copy_buf->actual < req->actual) {
+		list_add_tail(&copy_buf->list, &audio->play_queue);
+		schedule_work(&audio->playback_work);
+		copy_buf = f_audio_buffer_alloc(audio_buf_size);
+		if (IS_ERR(copy_buf))
+			return -ENOMEM;
+	}
+
+	memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
+	copy_buf->actual += req->actual;
+	audio->copy_buf = copy_buf;
+
+	err = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (err)
+		ERROR(cdev, "%s queue req: %d\n", ep->name, err);
+
+	return 0;
+
+}
+
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_audio *audio = req->context;
+	int status = req->status;
+	u32 data = 0;
+	struct usb_ep *out_ep = audio->out_ep;
+
+	switch (status) {
+
+	case 0:				/* normal completion? */
+		if (ep == out_ep)
+			f_audio_out_ep_complete(ep, req);
+		else if (audio->set_con) {
+			memcpy(&data, req->buf, req->length);
+			audio->set_con->set(audio->set_con, audio->set_cmd,
+					le16_to_cpu(data));
+			audio->set_con = NULL;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int audio_set_intf_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	u8			id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u8			con_sel = (w_value >> 8) & 0xFF;
+	u8			cmd = (ctrl->bRequest & 0x0F);
+	struct usb_audio_control_selector *cs;
+	struct usb_audio_control *con;
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+			ctrl->bRequest, w_value, len, id);
+
+	list_for_each_entry(cs, &audio->cs, list) {
+		if (cs->id == id) {
+			list_for_each_entry(con, &cs->control, list) {
+				if (con->type == con_sel) {
+					audio->set_con = con;
+					break;
+				}
+			}
+			break;
+		}
+	}
+
+	audio->set_cmd = cmd;
+	req->context = audio;
+	req->complete = f_audio_complete;
+
+	return len;
+}
+
+static int audio_get_intf_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u8			id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u8			con_sel = (w_value >> 8) & 0xFF;
+	u8			cmd = (ctrl->bRequest & 0x0F);
+	struct usb_audio_control_selector *cs;
+	struct usb_audio_control *con;
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+			ctrl->bRequest, w_value, len, id);
+
+	list_for_each_entry(cs, &audio->cs, list) {
+		if (cs->id == id) {
+			list_for_each_entry(con, &cs->control, list) {
+				if (con->type == con_sel && con->get) {
+					value = con->get(con, cmd);
+					break;
+				}
+			}
+			break;
+		}
+	}
+
+	req->context = audio;
+	req->complete = f_audio_complete;
+	memcpy(req->buf, &value, len);
+
+	return len;
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int			value = -EOPNOTSUPP;
+	u16			ep = le16_to_cpu(ctrl->wIndex);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_SET_CUR:
+		value = len;
+		break;
+
+	case UAC_SET_MIN:
+		break;
+
+	case UAC_SET_MAX:
+		break;
+
+	case UAC_SET_RES:
+		break;
+
+	case UAC_SET_MEM:
+		break;
+
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_GET_CUR:
+	case UAC_GET_MIN:
+	case UAC_GET_MAX:
+	case UAC_GET_RES:
+		value = len;
+		break;
+	case UAC_GET_MEM:
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int
+f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything; interface
+	 * activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
+		value = audio_set_intf_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
+		value = audio_get_intf_req(f, ctrl);
+		break;
+
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_set_endpoint_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_get_endpoint_req(f, ctrl);
+		break;
+
+	default:
+		ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_ep *out_ep = audio->out_ep;
+	struct usb_request *req;
+	int i = 0, err = 0;
+
+	DBG(cdev, "intf %d, alt %d\n", intf, alt);
+
+	if (intf == 1) {
+		if (alt == 1) {
+			usb_ep_enable(out_ep);
+			out_ep->driver_data = audio;
+			audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
+			if (IS_ERR(audio->copy_buf))
+				return -ENOMEM;
+
+			/*
+			 * allocate a bunch of read buffers
+			 * and queue them all at once.
+			 */
+			for (i = 0; i < req_count && err == 0; i++) {
+				req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
+				if (req) {
+					req->buf = kzalloc(req_buf_size,
+							GFP_ATOMIC);
+					if (req->buf) {
+						req->length = req_buf_size;
+						req->context = audio;
+						req->complete =
+							f_audio_complete;
+						err = usb_ep_queue(out_ep,
+							req, GFP_ATOMIC);
+						if (err)
+							ERROR(cdev,
+							"%s queue req: %d\n",
+							out_ep->name, err);
+					} else
+						err = -ENOMEM;
+				} else
+					err = -ENOMEM;
+			}
+
+		} else {
+			struct f_audio_buf *copy_buf = audio->copy_buf;
+			if (copy_buf) {
+				list_add_tail(&copy_buf->list,
+						&audio->play_queue);
+				schedule_work(&audio->playback_work);
+			}
+		}
+	}
+
+	return err;
+}
+
+static void f_audio_disable(struct usb_function *f)
+{
+	return;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_build_desc(struct f_audio *audio)
+{
+	struct gaudio *card = &audio->card;
+	u8 *sam_freq;
+	int rate;
+
+	/* Set channel numbers */
+	input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
+	as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
+
+	/* Set sample rates */
+	rate = u_audio_get_playback_rate(card);
+	sam_freq = as_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+
+	/* Todo: Set Sample bits and other parameters */
+
+	return;
+}
+
+/* audio function driver setup/binding */
+static int __init
+f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_audio		*audio = func_to_audio(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	f_audio_build_desc(audio);
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ac_interface_desc.bInterfaceNumber = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	as_interface_alt_0_desc.bInterfaceNumber = status;
+	as_interface_alt_1_desc.bInterfaceNumber = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->out_ep = ep;
+	audio->out_ep->desc = &as_out_ep_desc;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(f_audio_desc);
+
+	/*
+	 * support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		c->highspeed = true;
+		f->hs_descriptors = usb_copy_descriptors(f_audio_desc);
+	}
+
+	return 0;
+
+fail:
+
+	return status;
+}
+
+static void
+f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_audio		*audio = func_to_audio(f);
+
+	usb_free_descriptors(f->descriptors);
+	usb_free_descriptors(f->hs_descriptors);
+	kfree(audio);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
+{
+	con->data[cmd] = value;
+
+	return 0;
+}
+
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
+{
+	return con->data[cmd];
+}
+
+/* Todo: add more control selecotor dynamically */
+int __init control_selector_init(struct f_audio *audio)
+{
+	INIT_LIST_HEAD(&audio->cs);
+	list_add(&feature_unit.list, &audio->cs);
+
+	INIT_LIST_HEAD(&feature_unit.control);
+	list_add(&mute_control.list, &feature_unit.control);
+	list_add(&volume_control.list, &feature_unit.control);
+
+	volume_control.data[UAC__CUR] = 0xffc0;
+	volume_control.data[UAC__MIN] = 0xe3a0;
+	volume_control.data[UAC__MAX] = 0xfff0;
+	volume_control.data[UAC__RES] = 0x0030;
+
+	return 0;
+}
+
+/**
+ * audio_bind_config - add USB audio function to a configuration
+ * @c: the configuration to supcard the USB audio function
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ */
+int __init audio_bind_config(struct usb_configuration *c)
+{
+	struct f_audio *audio;
+	int status;
+
+	/* allocate and initialize one new instance */
+	audio = kzalloc(sizeof *audio, GFP_KERNEL);
+	if (!audio)
+		return -ENOMEM;
+
+	audio->card.func.name = "g_audio";
+	audio->card.gadget = c->cdev->gadget;
+
+	INIT_LIST_HEAD(&audio->play_queue);
+	spin_lock_init(&audio->lock);
+
+	/* set up ASLA audio devices */
+	status = gaudio_setup(&audio->card);
+	if (status < 0)
+		goto setup_fail;
+
+	audio->card.func.strings = audio_strings;
+	audio->card.func.bind = f_audio_bind;
+	audio->card.func.unbind = f_audio_unbind;
+	audio->card.func.set_alt = f_audio_set_alt;
+	audio->card.func.setup = f_audio_setup;
+	audio->card.func.disable = f_audio_disable;
+
+	control_selector_init(audio);
+
+	INIT_WORK(&audio->playback_work, f_audio_playback_work);
+
+	status = usb_add_function(c, &audio->card.func);
+	if (status)
+		goto add_fail;
+
+	INFO(c->cdev, "audio_buf_size %d, req_buf_size %d, req_count %d\n",
+		audio_buf_size, req_buf_size, req_count);
+
+	return status;
+
+add_fail:
+	gaudio_cleanup();
+setup_fail:
+	kfree(audio);
+	return status;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uac2.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uac2.c
new file mode 100644
index 0000000..e7cc4de
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uac2.c
@@ -0,0 +1,1449 @@
+/*
+ * f_uac2.c -- USB Audio Class 2.0 Function
+ *
+ * Copyright (C) 2011
+ *    Yadwinder Singh (yadi.brar01@gmail.com)
+ *    Jaswinder Singh (jaswinder.singh@linaro.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+/* Playback(USB-IN) Default Stereo - Fl/Fr */
+static int p_chmask = 0x3;
+module_param(p_chmask, uint, S_IRUGO);
+MODULE_PARM_DESC(p_chmask, "Playback Channel Mask");
+
+/* Playback Default 48 KHz */
+static int p_srate = 48000;
+module_param(p_srate, uint, S_IRUGO);
+MODULE_PARM_DESC(p_srate, "Playback Sampling Rate");
+
+/* Playback Default 16bits/sample */
+static int p_ssize = 2;
+module_param(p_ssize, uint, S_IRUGO);
+MODULE_PARM_DESC(p_ssize, "Playback Sample Size(bytes)");
+
+/* Capture(USB-OUT) Default Stereo - Fl/Fr */
+static int c_chmask = 0x3;
+module_param(c_chmask, uint, S_IRUGO);
+MODULE_PARM_DESC(c_chmask, "Capture Channel Mask");
+
+/* Capture Default 64 KHz */
+static int c_srate = 64000;
+module_param(c_srate, uint, S_IRUGO);
+MODULE_PARM_DESC(c_srate, "Capture Sampling Rate");
+
+/* Capture Default 16bits/sample */
+static int c_ssize = 2;
+module_param(c_ssize, uint, S_IRUGO);
+MODULE_PARM_DESC(c_ssize, "Capture Sample Size(bytes)");
+
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#define ALT_SET(x, a)	do {(x) &= ~0xff; (x) |= (a); } while (0)
+#define ALT_GET(x)	((x) & 0xff)
+#define INTF_SET(x, i)	do {(x) &= 0xff; (x) |= ((i) << 8); } while (0)
+#define INTF_GET(x)	((x >> 8) & 0xff)
+
+/* Keep everyone on toes */
+#define USB_XFERS	2
+
+/*
+ * The driver implements a simple UAC_2 topology.
+ * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
+ * ALSA_Playback -> IT_2 -> OT_4 -> USB-IN
+ * Capture and Playback sampling rates are independently
+ *  controlled by two clock sources :
+ *    CLK_5 := c_srate, and CLK_6 := p_srate
+ */
+#define USB_OUT_IT_ID	1
+#define IO_IN_IT_ID	2
+#define IO_OUT_OT_ID	3
+#define USB_IN_OT_ID	4
+#define USB_OUT_CLK_ID	5
+#define USB_IN_CLK_ID	6
+
+#define CONTROL_ABSENT	0
+#define CONTROL_RDONLY	1
+#define CONTROL_RDWR	3
+
+#define CLK_FREQ_CTRL	0
+#define CLK_VLD_CTRL	2
+
+#define COPY_CTRL	0
+#define CONN_CTRL	2
+#define OVRLD_CTRL	4
+#define CLSTR_CTRL	6
+#define UNFLW_CTRL	8
+#define OVFLW_CTRL	10
+
+const char *uac2_name = "snd_uac2";
+
+struct uac2_req {
+	struct uac2_rtd_params *pp; /* parent param */
+	struct usb_request *req;
+};
+
+struct uac2_rtd_params {
+	bool ep_enabled; /* if the ep is enabled */
+	/* Size of the ring buffer */
+	size_t dma_bytes;
+	unsigned char *dma_area;
+
+	struct snd_pcm_substream *ss;
+
+	/* Ring buffer */
+	ssize_t hw_ptr;
+
+	void *rbuf;
+
+	size_t period_size;
+
+	unsigned max_psize;
+	struct uac2_req ureq[USB_XFERS];
+
+	spinlock_t lock;
+};
+
+struct snd_uac2_chip {
+	struct platform_device pdev;
+	struct platform_driver pdrv;
+
+	struct uac2_rtd_params p_prm;
+	struct uac2_rtd_params c_prm;
+
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+};
+
+#define BUFF_SIZE_MAX	(PAGE_SIZE * 16)
+#define PRD_SIZE_MAX	PAGE_SIZE
+#define MIN_PERIODS	4
+
+static struct snd_pcm_hardware uac2_pcm_hardware = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER
+		 | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
+		 | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX,
+	.buffer_bytes_max = BUFF_SIZE_MAX,
+	.period_bytes_max = PRD_SIZE_MAX,
+	.periods_min = MIN_PERIODS,
+};
+
+struct audio_dev {
+	/* Currently active {Interface[15:8] | AltSettings[7:0]} */
+	__u16 ac_alt, as_out_alt, as_in_alt;
+
+	struct usb_ep *in_ep, *out_ep;
+	struct usb_function func;
+
+	/* The ALSA Sound Card it represents on the USB-Client side */
+	struct snd_uac2_chip uac2;
+};
+
+static struct audio_dev *agdev_g;
+
+static inline
+struct audio_dev *func_to_agdev(struct usb_function *f)
+{
+	return container_of(f, struct audio_dev, func);
+}
+
+static inline
+struct audio_dev *uac2_to_agdev(struct snd_uac2_chip *u)
+{
+	return container_of(u, struct audio_dev, uac2);
+}
+
+static inline
+struct snd_uac2_chip *pdev_to_uac2(struct platform_device *p)
+{
+	return container_of(p, struct snd_uac2_chip, pdev);
+}
+
+static inline
+struct snd_uac2_chip *prm_to_uac2(struct uac2_rtd_params *r)
+{
+	struct snd_uac2_chip *uac2 = container_of(r,
+					struct snd_uac2_chip, c_prm);
+
+	if (&uac2->c_prm != r)
+		uac2 = container_of(r, struct snd_uac2_chip, p_prm);
+
+	return uac2;
+}
+
+static inline
+uint num_channels(uint chanmask)
+{
+	uint num = 0;
+
+	while (chanmask) {
+		num += (chanmask & 1);
+		chanmask >>= 1;
+	}
+
+	return num;
+}
+
+static void
+agdev_iso_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	unsigned pending;
+	unsigned long flags;
+	bool update_alsa = false;
+	unsigned char *src, *dst;
+	int status = req->status;
+	struct uac2_req *ur = req->context;
+	struct snd_pcm_substream *substream;
+	struct uac2_rtd_params *prm = ur->pp;
+	struct snd_uac2_chip *uac2 = prm_to_uac2(prm);
+
+	/* i/f shutting down */
+	if (!prm->ep_enabled)
+		return;
+
+	/*
+	 * We can't really do much about bad xfers.
+	 * Afterall, the ISOCH xfers could fail legitimately.
+	 */
+	if (status)
+		pr_debug("%s: iso_complete status(%d) %d/%d\n",
+			__func__, status, req->actual, req->length);
+
+	substream = prm->ss;
+
+	/* Do nothing if ALSA isn't active */
+	if (!substream)
+		goto exit;
+
+	spin_lock_irqsave(&prm->lock, flags);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		src = prm->dma_area + prm->hw_ptr;
+		req->actual = req->length;
+		dst = req->buf;
+	} else {
+		dst = prm->dma_area + prm->hw_ptr;
+		src = req->buf;
+	}
+
+	pending = prm->hw_ptr % prm->period_size;
+	pending += req->actual;
+	if (pending >= prm->period_size)
+		update_alsa = true;
+
+	prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
+
+	spin_unlock_irqrestore(&prm->lock, flags);
+
+	/* Pack USB load in ALSA ring buffer */
+	memcpy(dst, src, req->actual);
+exit:
+	if (usb_ep_queue(ep, req, GFP_ATOMIC))
+		dev_err(&uac2->pdev.dev, "%d Error!\n", __LINE__);
+
+	if (update_alsa)
+		snd_pcm_period_elapsed(substream);
+
+	return;
+}
+
+static int
+uac2_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
+	struct audio_dev *agdev = uac2_to_agdev(uac2);
+	struct uac2_rtd_params *prm;
+	unsigned long flags;
+	struct usb_ep *ep;
+	int err = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ep = agdev->in_ep;
+		prm = &uac2->p_prm;
+	} else {
+		ep = agdev->out_ep;
+		prm = &uac2->c_prm;
+	}
+
+	spin_lock_irqsave(&prm->lock, flags);
+
+	/* Reset */
+	prm->hw_ptr = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		prm->ss = substream;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		prm->ss = NULL;
+		break;
+	default:
+		err = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&prm->lock, flags);
+
+	/* Clear buffer after Play stops */
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
+		memset(prm->rbuf, 0, prm->max_psize * USB_XFERS);
+
+	return err;
+}
+
+static snd_pcm_uframes_t uac2_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
+	struct uac2_rtd_params *prm;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prm = &uac2->p_prm;
+	else
+		prm = &uac2->c_prm;
+
+	return bytes_to_frames(substream->runtime, prm->hw_ptr);
+}
+
+static int uac2_pcm_hw_params(struct snd_pcm_substream *substream,
+			       struct snd_pcm_hw_params *hw_params)
+{
+	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
+	struct uac2_rtd_params *prm;
+	int err;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prm = &uac2->p_prm;
+	else
+		prm = &uac2->c_prm;
+
+	err = snd_pcm_lib_malloc_pages(substream,
+					params_buffer_bytes(hw_params));
+	if (err >= 0) {
+		prm->dma_bytes = substream->runtime->dma_bytes;
+		prm->dma_area = substream->runtime->dma_area;
+		prm->period_size = params_period_bytes(hw_params);
+	}
+
+	return err;
+}
+
+static int uac2_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
+	struct uac2_rtd_params *prm;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prm = &uac2->p_prm;
+	else
+		prm = &uac2->c_prm;
+
+	prm->dma_area = NULL;
+	prm->dma_bytes = 0;
+	prm->period_size = 0;
+
+	return snd_pcm_lib_free_pages(substream);
+}
+
+static int uac2_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	runtime->hw = uac2_pcm_hardware;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		spin_lock_init(&uac2->p_prm.lock);
+		runtime->hw.rate_min = p_srate;
+		runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; /* ! p_ssize ! */
+		runtime->hw.channels_min = num_channels(p_chmask);
+		runtime->hw.period_bytes_min = 2 * uac2->p_prm.max_psize
+						/ runtime->hw.periods_min;
+	} else {
+		spin_lock_init(&uac2->c_prm.lock);
+		runtime->hw.rate_min = c_srate;
+		runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; /* ! c_ssize ! */
+		runtime->hw.channels_min = num_channels(c_chmask);
+		runtime->hw.period_bytes_min = 2 * uac2->c_prm.max_psize
+						/ runtime->hw.periods_min;
+	}
+
+	runtime->hw.rate_max = runtime->hw.rate_min;
+	runtime->hw.channels_max = runtime->hw.channels_min;
+
+	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+	return 0;
+}
+
+/* ALSA cries without these function pointers */
+static int uac2_pcm_null(struct snd_pcm_substream *substream)
+{
+	return 0;
+}
+
+static struct snd_pcm_ops uac2_pcm_ops = {
+	.open = uac2_pcm_open,
+	.close = uac2_pcm_null,
+	.ioctl = snd_pcm_lib_ioctl,
+	.hw_params = uac2_pcm_hw_params,
+	.hw_free = uac2_pcm_hw_free,
+	.trigger = uac2_pcm_trigger,
+	.pointer = uac2_pcm_pointer,
+	.prepare = uac2_pcm_null,
+};
+
+static int __devinit snd_uac2_probe(struct platform_device *pdev)
+{
+	struct snd_uac2_chip *uac2 = pdev_to_uac2(pdev);
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int err;
+
+	/* Choose any slot, with no id */
+	err = snd_card_create(-1, NULL, THIS_MODULE, 0, &card);
+	if (err < 0)
+		return err;
+
+	uac2->card = card;
+
+	/*
+	 * Create first PCM device
+	 * Create a substream only for non-zero channel streams
+	 */
+	err = snd_pcm_new(uac2->card, "UAC2 PCM", 0,
+			       p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm);
+	if (err < 0)
+		goto snd_fail;
+
+	strcpy(pcm->name, "UAC2 PCM");
+	pcm->private_data = uac2;
+
+	uac2->pcm = pcm;
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac2_pcm_ops);
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac2_pcm_ops);
+
+	strcpy(card->driver, "UAC2_Gadget");
+	strcpy(card->shortname, "UAC2_Gadget");
+	sprintf(card->longname, "UAC2_Gadget %i", pdev->id);
+
+	snd_card_set_dev(card, &pdev->dev);
+
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+		snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
+
+	err = snd_card_register(card);
+	if (!err) {
+		platform_set_drvdata(pdev, card);
+		return 0;
+	}
+
+snd_fail:
+	snd_card_free(card);
+
+	uac2->pcm = NULL;
+	uac2->card = NULL;
+
+	return err;
+}
+
+static int __devexit snd_uac2_remove(struct platform_device *pdev)
+{
+	struct snd_card *card = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	if (card)
+		return snd_card_free(card);
+
+	return 0;
+}
+
+static int alsa_uac2_init(struct audio_dev *agdev)
+{
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	int err;
+
+	uac2->pdrv.probe = snd_uac2_probe;
+	uac2->pdrv.remove = snd_uac2_remove;
+	uac2->pdrv.driver.name = uac2_name;
+
+	uac2->pdev.id = 0;
+	uac2->pdev.name = uac2_name;
+
+	/* Register snd_uac2 driver */
+	err = platform_driver_register(&uac2->pdrv);
+	if (err)
+		return err;
+
+	/* Register snd_uac2 device */
+	err = platform_device_register(&uac2->pdev);
+	if (err)
+		platform_driver_unregister(&uac2->pdrv);
+
+	return err;
+}
+
+static void alsa_uac2_exit(struct audio_dev *agdev)
+{
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+
+	platform_driver_unregister(&uac2->pdrv);
+	platform_device_unregister(&uac2->pdev);
+}
+
+
+/* --------- USB Function Interface ------------- */
+
+enum {
+	STR_ASSOC,
+	STR_IF_CTRL,
+	STR_CLKSRC_IN,
+	STR_CLKSRC_OUT,
+	STR_USB_IT,
+	STR_IO_IT,
+	STR_USB_OT,
+	STR_IO_OT,
+	STR_AS_OUT_ALT0,
+	STR_AS_OUT_ALT1,
+	STR_AS_IN_ALT0,
+	STR_AS_IN_ALT1,
+};
+
+static const char ifassoc[] = "Source/Sink";
+static const char ifctrl[] = "Topology Control";
+static char clksrc_in[8];
+static char clksrc_out[8];
+static const char usb_it[] = "USBH Out";
+static const char io_it[] = "USBD Out";
+static const char usb_ot[] = "USBH In";
+static const char io_ot[] = "USBD In";
+static const char out_alt0[] = "Playback Inactive";
+static const char out_alt1[] = "Playback Active";
+static const char in_alt0[] = "Capture Inactive";
+static const char in_alt1[] = "Capture Active";
+
+static struct usb_string strings_fn[] = {
+	[STR_ASSOC].s = ifassoc,
+	[STR_IF_CTRL].s = ifctrl,
+	[STR_CLKSRC_IN].s = clksrc_in,
+	[STR_CLKSRC_OUT].s = clksrc_out,
+	[STR_USB_IT].s = usb_it,
+	[STR_IO_IT].s = io_it,
+	[STR_USB_OT].s = usb_ot,
+	[STR_IO_OT].s = io_ot,
+	[STR_AS_OUT_ALT0].s = out_alt0,
+	[STR_AS_OUT_ALT1].s = out_alt1,
+	[STR_AS_IN_ALT0].s = in_alt0,
+	[STR_AS_IN_ALT1].s = in_alt1,
+	{ },
+};
+
+static struct usb_gadget_strings str_fn = {
+	.language = 0x0409,	/* en-us */
+	.strings = strings_fn,
+};
+
+static struct usb_gadget_strings *fn_strings[] = {
+	&str_fn,
+	NULL,
+};
+
+static struct usb_qualifier_descriptor devqual_desc = {
+	.bLength = sizeof devqual_desc,
+	.bDescriptorType = USB_DT_DEVICE_QUALIFIER,
+
+	.bcdUSB = cpu_to_le16(0x200),
+	.bDeviceClass = USB_CLASS_MISC,
+	.bDeviceSubClass = 0x02,
+	.bDeviceProtocol = 0x01,
+	.bNumConfigurations = 1,
+	.bRESERVED = 0,
+};
+
+static struct usb_interface_assoc_descriptor iad_desc = {
+	.bLength = sizeof iad_desc,
+	.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+	.bFirstInterface = 0,
+	.bInterfaceCount = 3,
+	.bFunctionClass = USB_CLASS_AUDIO,
+	.bFunctionSubClass = UAC2_FUNCTION_SUBCLASS_UNDEFINED,
+	.bFunctionProtocol = UAC_VERSION_2,
+};
+
+/* Audio Control Interface */
+static struct usb_interface_descriptor std_ac_if_desc = {
+	.bLength = sizeof std_ac_if_desc,
+	.bDescriptorType = USB_DT_INTERFACE,
+
+	.bAlternateSetting = 0,
+	.bNumEndpoints = 0,
+	.bInterfaceClass = USB_CLASS_AUDIO,
+	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+	.bInterfaceProtocol = UAC_VERSION_2,
+};
+
+/* Clock source for IN traffic */
+struct uac_clock_source_descriptor in_clk_src_desc = {
+	.bLength = sizeof in_clk_src_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
+	.bClockID = USB_IN_CLK_ID,
+	.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
+	.bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL),
+	.bAssocTerminal = 0,
+};
+
+/* Clock source for OUT traffic */
+struct uac_clock_source_descriptor out_clk_src_desc = {
+	.bLength = sizeof out_clk_src_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
+	.bClockID = USB_OUT_CLK_ID,
+	.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
+	.bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL),
+	.bAssocTerminal = 0,
+};
+
+/* Input Terminal for USB_OUT */
+struct uac2_input_terminal_descriptor usb_out_it_desc = {
+	.bLength = sizeof usb_out_it_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC_INPUT_TERMINAL,
+	.bTerminalID = USB_OUT_IT_ID,
+	.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
+	.bAssocTerminal = 0,
+	.bCSourceID = USB_OUT_CLK_ID,
+	.iChannelNames = 0,
+	.bmControls = (CONTROL_RDWR << COPY_CTRL),
+};
+
+/* Input Terminal for I/O-In */
+struct uac2_input_terminal_descriptor io_in_it_desc = {
+	.bLength = sizeof io_in_it_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC_INPUT_TERMINAL,
+	.bTerminalID = IO_IN_IT_ID,
+	.wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
+	.bAssocTerminal = 0,
+	.bCSourceID = USB_IN_CLK_ID,
+	.iChannelNames = 0,
+	.bmControls = (CONTROL_RDWR << COPY_CTRL),
+};
+
+/* Ouput Terminal for USB_IN */
+struct uac2_output_terminal_descriptor usb_in_ot_desc = {
+	.bLength = sizeof usb_in_ot_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+	.bTerminalID = USB_IN_OT_ID,
+	.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
+	.bAssocTerminal = 0,
+	.bSourceID = IO_IN_IT_ID,
+	.bCSourceID = USB_IN_CLK_ID,
+	.bmControls = (CONTROL_RDWR << COPY_CTRL),
+};
+
+/* Ouput Terminal for I/O-Out */
+struct uac2_output_terminal_descriptor io_out_ot_desc = {
+	.bLength = sizeof io_out_ot_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+	.bTerminalID = IO_OUT_OT_ID,
+	.wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
+	.bAssocTerminal = 0,
+	.bSourceID = USB_OUT_IT_ID,
+	.bCSourceID = USB_OUT_CLK_ID,
+	.bmControls = (CONTROL_RDWR << COPY_CTRL),
+};
+
+struct uac2_ac_header_descriptor ac_hdr_desc = {
+	.bLength = sizeof ac_hdr_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC_MS_HEADER,
+	.bcdADC = cpu_to_le16(0x200),
+	.bCategory = UAC2_FUNCTION_IO_BOX,
+	.wTotalLength = sizeof in_clk_src_desc + sizeof out_clk_src_desc
+			 + sizeof usb_out_it_desc + sizeof io_in_it_desc
+			+ sizeof usb_in_ot_desc + sizeof io_out_ot_desc,
+	.bmControls = 0,
+};
+
+/* Audio Streaming OUT Interface - Alt0 */
+static struct usb_interface_descriptor std_as_out_if0_desc = {
+	.bLength = sizeof std_as_out_if0_desc,
+	.bDescriptorType = USB_DT_INTERFACE,
+
+	.bAlternateSetting = 0,
+	.bNumEndpoints = 0,
+	.bInterfaceClass = USB_CLASS_AUDIO,
+	.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+	.bInterfaceProtocol = UAC_VERSION_2,
+};
+
+/* Audio Streaming OUT Interface - Alt1 */
+static struct usb_interface_descriptor std_as_out_if1_desc = {
+	.bLength = sizeof std_as_out_if1_desc,
+	.bDescriptorType = USB_DT_INTERFACE,
+
+	.bAlternateSetting = 1,
+	.bNumEndpoints = 1,
+	.bInterfaceClass = USB_CLASS_AUDIO,
+	.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+	.bInterfaceProtocol = UAC_VERSION_2,
+};
+
+/* Audio Stream OUT Intface Desc */
+struct uac2_as_header_descriptor as_out_hdr_desc = {
+	.bLength = sizeof as_out_hdr_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC_AS_GENERAL,
+	.bTerminalLink = USB_OUT_IT_ID,
+	.bmControls = 0,
+	.bFormatType = UAC_FORMAT_TYPE_I,
+	.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
+	.iChannelNames = 0,
+};
+
+/* Audio USB_OUT Format */
+struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
+	.bLength = sizeof as_out_fmt1_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_FORMAT_TYPE,
+	.bFormatType = UAC_FORMAT_TYPE_I,
+};
+
+/* STD AS ISO OUT Endpoint */
+struct usb_endpoint_descriptor fs_epout_desc = {
+	.bLength = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bEndpointAddress = USB_DIR_OUT,
+	.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
+	.bInterval = 1,
+};
+
+struct usb_endpoint_descriptor hs_epout_desc = {
+	.bLength = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
+	.bInterval = 4,
+};
+
+/* CS AS ISO OUT Endpoint */
+static struct uac2_iso_endpoint_descriptor as_iso_out_desc = {
+	.bLength = sizeof as_iso_out_desc,
+	.bDescriptorType = USB_DT_CS_ENDPOINT,
+
+	.bDescriptorSubtype = UAC_EP_GENERAL,
+	.bmAttributes = 0,
+	.bmControls = 0,
+	.bLockDelayUnits = 0,
+	.wLockDelay = 0,
+};
+
+/* Audio Streaming IN Interface - Alt0 */
+static struct usb_interface_descriptor std_as_in_if0_desc = {
+	.bLength = sizeof std_as_in_if0_desc,
+	.bDescriptorType = USB_DT_INTERFACE,
+
+	.bAlternateSetting = 0,
+	.bNumEndpoints = 0,
+	.bInterfaceClass = USB_CLASS_AUDIO,
+	.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+	.bInterfaceProtocol = UAC_VERSION_2,
+};
+
+/* Audio Streaming IN Interface - Alt1 */
+static struct usb_interface_descriptor std_as_in_if1_desc = {
+	.bLength = sizeof std_as_in_if1_desc,
+	.bDescriptorType = USB_DT_INTERFACE,
+
+	.bAlternateSetting = 1,
+	.bNumEndpoints = 1,
+	.bInterfaceClass = USB_CLASS_AUDIO,
+	.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+	.bInterfaceProtocol = UAC_VERSION_2,
+};
+
+/* Audio Stream IN Intface Desc */
+struct uac2_as_header_descriptor as_in_hdr_desc = {
+	.bLength = sizeof as_in_hdr_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+
+	.bDescriptorSubtype = UAC_AS_GENERAL,
+	.bTerminalLink = USB_IN_OT_ID,
+	.bmControls = 0,
+	.bFormatType = UAC_FORMAT_TYPE_I,
+	.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
+	.iChannelNames = 0,
+};
+
+/* Audio USB_IN Format */
+struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
+	.bLength = sizeof as_in_fmt1_desc,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_FORMAT_TYPE,
+	.bFormatType = UAC_FORMAT_TYPE_I,
+};
+
+/* STD AS ISO IN Endpoint */
+struct usb_endpoint_descriptor fs_epin_desc = {
+	.bLength = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bEndpointAddress = USB_DIR_IN,
+	.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
+	.bInterval = 1,
+};
+
+struct usb_endpoint_descriptor hs_epin_desc = {
+	.bLength = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
+	.bInterval = 4,
+};
+
+/* CS AS ISO IN Endpoint */
+static struct uac2_iso_endpoint_descriptor as_iso_in_desc = {
+	.bLength = sizeof as_iso_in_desc,
+	.bDescriptorType = USB_DT_CS_ENDPOINT,
+
+	.bDescriptorSubtype = UAC_EP_GENERAL,
+	.bmAttributes = 0,
+	.bmControls = 0,
+	.bLockDelayUnits = 0,
+	.wLockDelay = 0,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+	(struct usb_descriptor_header *)&iad_desc,
+	(struct usb_descriptor_header *)&std_ac_if_desc,
+
+	(struct usb_descriptor_header *)&ac_hdr_desc,
+	(struct usb_descriptor_header *)&in_clk_src_desc,
+	(struct usb_descriptor_header *)&out_clk_src_desc,
+	(struct usb_descriptor_header *)&usb_out_it_desc,
+	(struct usb_descriptor_header *)&io_in_it_desc,
+	(struct usb_descriptor_header *)&usb_in_ot_desc,
+	(struct usb_descriptor_header *)&io_out_ot_desc,
+
+	(struct usb_descriptor_header *)&std_as_out_if0_desc,
+	(struct usb_descriptor_header *)&std_as_out_if1_desc,
+
+	(struct usb_descriptor_header *)&as_out_hdr_desc,
+	(struct usb_descriptor_header *)&as_out_fmt1_desc,
+	(struct usb_descriptor_header *)&fs_epout_desc,
+	(struct usb_descriptor_header *)&as_iso_out_desc,
+
+	(struct usb_descriptor_header *)&std_as_in_if0_desc,
+	(struct usb_descriptor_header *)&std_as_in_if1_desc,
+
+	(struct usb_descriptor_header *)&as_in_hdr_desc,
+	(struct usb_descriptor_header *)&as_in_fmt1_desc,
+	(struct usb_descriptor_header *)&fs_epin_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+	(struct usb_descriptor_header *)&iad_desc,
+	(struct usb_descriptor_header *)&std_ac_if_desc,
+
+	(struct usb_descriptor_header *)&ac_hdr_desc,
+	(struct usb_descriptor_header *)&in_clk_src_desc,
+	(struct usb_descriptor_header *)&out_clk_src_desc,
+	(struct usb_descriptor_header *)&usb_out_it_desc,
+	(struct usb_descriptor_header *)&io_in_it_desc,
+	(struct usb_descriptor_header *)&usb_in_ot_desc,
+	(struct usb_descriptor_header *)&io_out_ot_desc,
+
+	(struct usb_descriptor_header *)&std_as_out_if0_desc,
+	(struct usb_descriptor_header *)&std_as_out_if1_desc,
+
+	(struct usb_descriptor_header *)&as_out_hdr_desc,
+	(struct usb_descriptor_header *)&as_out_fmt1_desc,
+	(struct usb_descriptor_header *)&hs_epout_desc,
+	(struct usb_descriptor_header *)&as_iso_out_desc,
+
+	(struct usb_descriptor_header *)&std_as_in_if0_desc,
+	(struct usb_descriptor_header *)&std_as_in_if1_desc,
+
+	(struct usb_descriptor_header *)&as_in_hdr_desc,
+	(struct usb_descriptor_header *)&as_in_fmt1_desc,
+	(struct usb_descriptor_header *)&hs_epin_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+struct cntrl_cur_lay3 {
+	__u32	dCUR;
+};
+
+struct cntrl_range_lay3 {
+	__u16	wNumSubRanges;
+	__u32	dMIN;
+	__u32	dMAX;
+	__u32	dRES;
+} __packed;
+
+static inline void
+free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
+{
+	struct snd_uac2_chip *uac2 = prm_to_uac2(prm);
+	int i;
+
+	prm->ep_enabled = false;
+
+	for (i = 0; i < USB_XFERS; i++) {
+		if (prm->ureq[i].req) {
+			usb_ep_dequeue(ep, prm->ureq[i].req);
+			usb_ep_free_request(ep, prm->ureq[i].req);
+			prm->ureq[i].req = NULL;
+		}
+	}
+
+	if (usb_ep_disable(ep))
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+}
+
+static int __init
+afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+{
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct usb_composite_dev *cdev = cfg->cdev;
+	struct usb_gadget *gadget = cdev->gadget;
+	struct uac2_rtd_params *prm;
+	int ret;
+
+	ret = usb_interface_id(cfg, fn);
+	if (ret < 0) {
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+		return ret;
+	}
+	std_ac_if_desc.bInterfaceNumber = ret;
+	ALT_SET(agdev->ac_alt, 0);
+	INTF_SET(agdev->ac_alt, ret);
+
+	ret = usb_interface_id(cfg, fn);
+	if (ret < 0) {
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+		return ret;
+	}
+	std_as_out_if0_desc.bInterfaceNumber = ret;
+	std_as_out_if1_desc.bInterfaceNumber = ret;
+	ALT_SET(agdev->as_out_alt, 0);
+	INTF_SET(agdev->as_out_alt, ret);
+
+	ret = usb_interface_id(cfg, fn);
+	if (ret < 0) {
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+		return ret;
+	}
+	std_as_in_if0_desc.bInterfaceNumber = ret;
+	std_as_in_if1_desc.bInterfaceNumber = ret;
+	ALT_SET(agdev->as_in_alt, 0);
+	INTF_SET(agdev->as_in_alt, ret);
+
+	agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
+	if (!agdev->out_ep)
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+	agdev->out_ep->driver_data = agdev;
+
+	agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
+	if (!agdev->in_ep)
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+	agdev->in_ep->driver_data = agdev;
+
+	hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
+	hs_epout_desc.wMaxPacketSize = fs_epout_desc.wMaxPacketSize;
+	hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
+	hs_epin_desc.wMaxPacketSize = fs_epin_desc.wMaxPacketSize;
+
+	fn->descriptors = usb_copy_descriptors(fs_audio_desc);
+	if (gadget_is_dualspeed(gadget))
+		fn->hs_descriptors = usb_copy_descriptors(hs_audio_desc);
+
+	prm = &agdev->uac2.c_prm;
+	prm->max_psize = hs_epout_desc.wMaxPacketSize;
+	prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
+	if (!prm->rbuf) {
+		prm->max_psize = 0;
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+	}
+
+	prm = &agdev->uac2.p_prm;
+	prm->max_psize = hs_epin_desc.wMaxPacketSize;
+	prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
+	if (!prm->rbuf) {
+		prm->max_psize = 0;
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+	}
+
+	return alsa_uac2_init(agdev);
+}
+
+static void
+afunc_unbind(struct usb_configuration *cfg, struct usb_function *fn)
+{
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct usb_composite_dev *cdev = cfg->cdev;
+	struct usb_gadget *gadget = cdev->gadget;
+	struct uac2_rtd_params *prm;
+
+	alsa_uac2_exit(agdev);
+
+	prm = &agdev->uac2.p_prm;
+	kfree(prm->rbuf);
+
+	prm = &agdev->uac2.c_prm;
+	kfree(prm->rbuf);
+
+	if (gadget_is_dualspeed(gadget))
+		usb_free_descriptors(fn->hs_descriptors);
+	usb_free_descriptors(fn->descriptors);
+
+	if (agdev->in_ep)
+		agdev->in_ep->driver_data = NULL;
+	if (agdev->out_ep)
+		agdev->out_ep->driver_data = NULL;
+}
+
+static int
+afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
+{
+	struct usb_composite_dev *cdev = fn->config->cdev;
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct usb_gadget *gadget = cdev->gadget;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	struct uac2_rtd_params *prm;
+	int i;
+
+	/* No i/f has more than 2 alt settings */
+	if (alt > 1) {
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (intf == INTF_GET(agdev->ac_alt)) {
+		/* Control I/f has only 1 AltSetting - 0 */
+		if (alt) {
+			dev_err(&uac2->pdev.dev,
+				"%s:%d Error!\n", __func__, __LINE__);
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	if (intf == INTF_GET(agdev->as_out_alt)) {
+		ep = agdev->out_ep;
+		prm = &uac2->c_prm;
+		config_ep_by_speed(gadget, fn, ep);
+		ALT_SET(agdev->as_out_alt, alt);
+	} else if (intf == INTF_GET(agdev->as_in_alt)) {
+		ep = agdev->in_ep;
+		prm = &uac2->p_prm;
+		config_ep_by_speed(gadget, fn, ep);
+		ALT_SET(agdev->as_in_alt, alt);
+	} else {
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (alt == 0) {
+		free_ep(prm, ep);
+		return 0;
+	}
+
+	prm->ep_enabled = true;
+	usb_ep_enable(ep);
+
+	for (i = 0; i < USB_XFERS; i++) {
+		if (prm->ureq[i].req) {
+			if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+				dev_err(&uac2->pdev.dev, "%d Error!\n",
+					__LINE__);
+			continue;
+		}
+
+		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+		if (req == NULL) {
+			dev_err(&uac2->pdev.dev,
+				"%s:%d Error!\n", __func__, __LINE__);
+			return -EINVAL;
+		}
+
+		prm->ureq[i].req = req;
+		prm->ureq[i].pp = prm;
+
+		req->zero = 0;
+		req->dma = DMA_ADDR_INVALID;
+		req->context = &prm->ureq[i];
+		req->length = prm->max_psize;
+		req->complete =	agdev_iso_complete;
+		req->buf = prm->rbuf + i * req->length;
+
+		if (usb_ep_queue(ep, req, GFP_ATOMIC))
+			dev_err(&uac2->pdev.dev, "%d Error!\n", __LINE__);
+	}
+
+	return 0;
+}
+
+static int
+afunc_get_alt(struct usb_function *fn, unsigned intf)
+{
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+
+	if (intf == INTF_GET(agdev->ac_alt))
+		return ALT_GET(agdev->ac_alt);
+	else if (intf == INTF_GET(agdev->as_out_alt))
+		return ALT_GET(agdev->as_out_alt);
+	else if (intf == INTF_GET(agdev->as_in_alt))
+		return ALT_GET(agdev->as_in_alt);
+	else
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Invalid Interface %d!\n",
+			__func__, __LINE__, intf);
+
+	return -EINVAL;
+}
+
+static void
+afunc_disable(struct usb_function *fn)
+{
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+
+	free_ep(&uac2->p_prm, agdev->in_ep);
+	ALT_SET(agdev->as_in_alt, 0);
+
+	free_ep(&uac2->c_prm, agdev->out_ep);
+	ALT_SET(agdev->as_out_alt, 0);
+}
+
+static int
+in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
+{
+	struct usb_request *req = fn->config->cdev->req;
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	u16 w_length = le16_to_cpu(cr->wLength);
+	u16 w_index = le16_to_cpu(cr->wIndex);
+	u16 w_value = le16_to_cpu(cr->wValue);
+	u8 entity_id = (w_index >> 8) & 0xff;
+	u8 control_selector = w_value >> 8;
+	int value = -EOPNOTSUPP;
+
+	if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
+		struct cntrl_cur_lay3 c;
+
+		if (entity_id == USB_IN_CLK_ID)
+			c.dCUR = p_srate;
+		else if (entity_id == USB_OUT_CLK_ID)
+			c.dCUR = c_srate;
+
+		value = min_t(unsigned, w_length, sizeof c);
+		memcpy(req->buf, &c, value);
+	} else if (control_selector == UAC2_CS_CONTROL_CLOCK_VALID) {
+		*(u8 *)req->buf = 1;
+		value = min_t(unsigned, w_length, 1);
+	} else {
+		dev_err(&uac2->pdev.dev,
+			"%s:%d control_selector=%d TODO!\n",
+			__func__, __LINE__, control_selector);
+	}
+
+	return value;
+}
+
+static int
+in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
+{
+	struct usb_request *req = fn->config->cdev->req;
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	u16 w_length = le16_to_cpu(cr->wLength);
+	u16 w_index = le16_to_cpu(cr->wIndex);
+	u16 w_value = le16_to_cpu(cr->wValue);
+	u8 entity_id = (w_index >> 8) & 0xff;
+	u8 control_selector = w_value >> 8;
+	struct cntrl_range_lay3 r;
+	int value = -EOPNOTSUPP;
+
+	if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
+		if (entity_id == USB_IN_CLK_ID)
+			r.dMIN = p_srate;
+		else if (entity_id == USB_OUT_CLK_ID)
+			r.dMIN = c_srate;
+		else
+			return -EOPNOTSUPP;
+
+		r.dMAX = r.dMIN;
+		r.dRES = 0;
+		r.wNumSubRanges = 1;
+
+		value = min_t(unsigned, w_length, sizeof r);
+		memcpy(req->buf, &r, value);
+	} else {
+		dev_err(&uac2->pdev.dev,
+			"%s:%d control_selector=%d TODO!\n",
+			__func__, __LINE__, control_selector);
+	}
+
+	return value;
+}
+
+static int
+ac_rq_in(struct usb_function *fn, const struct usb_ctrlrequest *cr)
+{
+	if (cr->bRequest == UAC2_CS_CUR)
+		return in_rq_cur(fn, cr);
+	else if (cr->bRequest == UAC2_CS_RANGE)
+		return in_rq_range(fn, cr);
+	else
+		return -EOPNOTSUPP;
+}
+
+static int
+out_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
+{
+	u16 w_length = le16_to_cpu(cr->wLength);
+	u16 w_value = le16_to_cpu(cr->wValue);
+	u8 control_selector = w_value >> 8;
+
+	if (control_selector == UAC2_CS_CONTROL_SAM_FREQ)
+		return w_length;
+
+	return -EOPNOTSUPP;
+}
+
+static int
+setup_rq_inf(struct usb_function *fn, const struct usb_ctrlrequest *cr)
+{
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	u16 w_index = le16_to_cpu(cr->wIndex);
+	u8 intf = w_index & 0xff;
+
+	if (intf != INTF_GET(agdev->ac_alt)) {
+		dev_err(&uac2->pdev.dev,
+			"%s:%d Error!\n", __func__, __LINE__);
+		return -EOPNOTSUPP;
+	}
+
+	if (cr->bRequestType & USB_DIR_IN)
+		return ac_rq_in(fn, cr);
+	else if (cr->bRequest == UAC2_CS_CUR)
+		return out_rq_cur(fn, cr);
+
+	return -EOPNOTSUPP;
+}
+
+static int
+afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr)
+{
+	struct usb_composite_dev *cdev = fn->config->cdev;
+	struct audio_dev *agdev = func_to_agdev(fn);
+	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct usb_request *req = cdev->req;
+	u16 w_length = le16_to_cpu(cr->wLength);
+	int value = -EOPNOTSUPP;
+
+	/* Only Class specific requests are supposed to reach here */
+	if ((cr->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS)
+		return -EOPNOTSUPP;
+
+	if ((cr->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE)
+		value = setup_rq_inf(fn, cr);
+	else
+		dev_err(&uac2->pdev.dev, "%s:%d Error!\n", __func__, __LINE__);
+
+	if (value >= 0) {
+		req->length = value;
+		req->zero = value < w_length;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			dev_err(&uac2->pdev.dev,
+				"%s:%d Error!\n", __func__, __LINE__);
+			req->status = 0;
+		}
+	}
+
+	return value;
+}
+
+static int audio_bind_config(struct usb_configuration *cfg)
+{
+	int id, res;
+
+	agdev_g = kzalloc(sizeof *agdev_g, GFP_KERNEL);
+	if (agdev_g == NULL) {
+		printk(KERN_ERR "Unable to allocate audio gadget\n");
+		return -ENOMEM;
+	}
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_ASSOC].id = id;
+	iad_desc.iFunction = id,
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_IF_CTRL].id = id;
+	std_ac_if_desc.iInterface = id,
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_CLKSRC_IN].id = id;
+	in_clk_src_desc.iClockSource = id,
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_CLKSRC_OUT].id = id;
+	out_clk_src_desc.iClockSource = id,
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_USB_IT].id = id;
+	usb_out_it_desc.iTerminal = id,
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_IO_IT].id = id;
+	io_in_it_desc.iTerminal = id;
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_USB_OT].id = id;
+	usb_in_ot_desc.iTerminal = id;
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_IO_OT].id = id;
+	io_out_ot_desc.iTerminal = id;
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_AS_OUT_ALT0].id = id;
+	std_as_out_if0_desc.iInterface = id;
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_AS_OUT_ALT1].id = id;
+	std_as_out_if1_desc.iInterface = id;
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_AS_IN_ALT0].id = id;
+	std_as_in_if0_desc.iInterface = id;
+
+	id = usb_string_id(cfg->cdev);
+	if (id < 0)
+		return id;
+
+	strings_fn[STR_AS_IN_ALT1].id = id;
+	std_as_in_if1_desc.iInterface = id;
+
+	agdev_g->func.name = "uac2_func";
+	agdev_g->func.strings = fn_strings;
+	agdev_g->func.bind = afunc_bind;
+	agdev_g->func.unbind = afunc_unbind;
+	agdev_g->func.set_alt = afunc_set_alt;
+	agdev_g->func.get_alt = afunc_get_alt;
+	agdev_g->func.disable = afunc_disable;
+	agdev_g->func.setup = afunc_setup;
+
+	/* Initialize the configurable parameters */
+	usb_out_it_desc.bNrChannels = num_channels(c_chmask);
+	usb_out_it_desc.bmChannelConfig = cpu_to_le32(c_chmask);
+	io_in_it_desc.bNrChannels = num_channels(p_chmask);
+	io_in_it_desc.bmChannelConfig = cpu_to_le32(p_chmask);
+	as_out_hdr_desc.bNrChannels = num_channels(c_chmask);
+	as_out_hdr_desc.bmChannelConfig = cpu_to_le32(c_chmask);
+	as_in_hdr_desc.bNrChannels = num_channels(p_chmask);
+	as_in_hdr_desc.bmChannelConfig = cpu_to_le32(p_chmask);
+	as_out_fmt1_desc.bSubslotSize = c_ssize;
+	as_out_fmt1_desc.bBitResolution = c_ssize * 8;
+	as_in_fmt1_desc.bSubslotSize = p_ssize;
+	as_in_fmt1_desc.bBitResolution = p_ssize * 8;
+
+	snprintf(clksrc_in, sizeof(clksrc_in), "%uHz", p_srate);
+	snprintf(clksrc_out, sizeof(clksrc_out), "%uHz", c_srate);
+
+	res = usb_add_function(cfg, &agdev_g->func);
+	if (res < 0)
+		kfree(agdev_g);
+
+	return res;
+}
+
+static void
+uac2_unbind_config(struct usb_configuration *cfg)
+{
+	kfree(agdev_g);
+	agdev_g = NULL;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uvc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uvc.c
new file mode 100644
index 0000000..a0abc65
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uvc.c
@@ -0,0 +1,665 @@
+/*
+ *	uvc_gadget.c  --  USB Video Class Gadget driver
+ *
+ *	Copyright (C) 2009-2010
+ *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/video.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+
+#include "uvc.h"
+
+unsigned int uvc_gadget_trace_param;
+
+/* --------------------------------------------------------------------------
+ * Function descriptors
+ */
+
+/* string IDs are assigned dynamically */
+
+#define UVC_STRING_ASSOCIATION_IDX		0
+#define UVC_STRING_CONTROL_IDX			1
+#define UVC_STRING_STREAMING_IDX		2
+
+static struct usb_string uvc_en_us_strings[] = {
+	[UVC_STRING_ASSOCIATION_IDX].s = "UVC Camera",
+	[UVC_STRING_CONTROL_IDX].s = "Video Control",
+	[UVC_STRING_STREAMING_IDX].s = "Video Streaming",
+	{  }
+};
+
+static struct usb_gadget_strings uvc_stringtab = {
+	.language = 0x0409,	/* en-us */
+	.strings = uvc_en_us_strings,
+};
+
+static struct usb_gadget_strings *uvc_function_strings[] = {
+	&uvc_stringtab,
+	NULL,
+};
+
+#define UVC_INTF_VIDEO_CONTROL			0
+#define UVC_INTF_VIDEO_STREAMING		1
+
+static struct usb_interface_assoc_descriptor uvc_iad __initdata = {
+	.bLength		= sizeof(uvc_iad),
+	.bDescriptorType	= USB_DT_INTERFACE_ASSOCIATION,
+	.bFirstInterface	= 0,
+	.bInterfaceCount	= 2,
+	.bFunctionClass		= USB_CLASS_VIDEO,
+	.bFunctionSubClass	= UVC_SC_VIDEO_INTERFACE_COLLECTION,
+	.bFunctionProtocol	= 0x00,
+	.iFunction		= 0,
+};
+
+static struct usb_interface_descriptor uvc_control_intf __initdata = {
+	.bLength		= USB_DT_INTERFACE_SIZE,
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= UVC_INTF_VIDEO_CONTROL,
+	.bAlternateSetting	= 0,
+	.bNumEndpoints		= 1,
+	.bInterfaceClass	= USB_CLASS_VIDEO,
+	.bInterfaceSubClass	= UVC_SC_VIDEOCONTROL,
+	.bInterfaceProtocol	= 0x00,
+	.iInterface		= 0,
+};
+
+static struct usb_endpoint_descriptor uvc_control_ep __initdata = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize		= cpu_to_le16(16),
+	.bInterval		= 8,
+};
+
+static struct uvc_control_endpoint_descriptor uvc_control_cs_ep __initdata = {
+	.bLength		= UVC_DT_CONTROL_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_CS_ENDPOINT,
+	.bDescriptorSubType	= UVC_EP_INTERRUPT,
+	.wMaxTransferSize	= cpu_to_le16(16),
+};
+
+static struct usb_interface_descriptor uvc_streaming_intf_alt0 __initdata = {
+	.bLength		= USB_DT_INTERFACE_SIZE,
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= UVC_INTF_VIDEO_STREAMING,
+	.bAlternateSetting	= 0,
+	.bNumEndpoints		= 0,
+	.bInterfaceClass	= USB_CLASS_VIDEO,
+	.bInterfaceSubClass	= UVC_SC_VIDEOSTREAMING,
+	.bInterfaceProtocol	= 0x00,
+	.iInterface		= 0,
+};
+
+static struct usb_interface_descriptor uvc_streaming_intf_alt1 __initdata = {
+	.bLength		= USB_DT_INTERFACE_SIZE,
+	.bDescriptorType	= USB_DT_INTERFACE,
+	.bInterfaceNumber	= UVC_INTF_VIDEO_STREAMING,
+	.bAlternateSetting	= 1,
+	.bNumEndpoints		= 1,
+	.bInterfaceClass	= USB_CLASS_VIDEO,
+	.bInterfaceSubClass	= UVC_SC_VIDEOSTREAMING,
+	.bInterfaceProtocol	= 0x00,
+	.iInterface		= 0,
+};
+
+static struct usb_endpoint_descriptor uvc_streaming_ep = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize		= cpu_to_le16(512),
+	.bInterval		= 1,
+};
+
+static const struct usb_descriptor_header * const uvc_fs_streaming[] = {
+	(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
+	(struct usb_descriptor_header *) &uvc_streaming_ep,
+	NULL,
+};
+
+static const struct usb_descriptor_header * const uvc_hs_streaming[] = {
+	(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
+	(struct usb_descriptor_header *) &uvc_streaming_ep,
+	NULL,
+};
+
+/* --------------------------------------------------------------------------
+ * Control requests
+ */
+
+static void
+uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct uvc_device *uvc = req->context;
+	struct v4l2_event v4l2_event;
+	struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+	if (uvc->event_setup_out) {
+		uvc->event_setup_out = 0;
+
+		memset(&v4l2_event, 0, sizeof(v4l2_event));
+		v4l2_event.type = UVC_EVENT_DATA;
+		uvc_event->data.length = req->actual;
+		memcpy(&uvc_event->data.data, req->buf, req->actual);
+		v4l2_event_queue(uvc->vdev, &v4l2_event);
+	}
+}
+
+static int
+uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct uvc_device *uvc = to_uvc(f);
+	struct v4l2_event v4l2_event;
+	struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+	/* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n",
+	 *	ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
+	 *	le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
+	 */
+
+	if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
+		INFO(f->config->cdev, "invalid request type\n");
+		return -EINVAL;
+	}
+
+	/* Stall too big requests. */
+	if (le16_to_cpu(ctrl->wLength) > UVC_MAX_REQUEST_SIZE)
+		return -EINVAL;
+
+	memset(&v4l2_event, 0, sizeof(v4l2_event));
+	v4l2_event.type = UVC_EVENT_SETUP;
+	memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
+	v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+	return 0;
+}
+
+static int
+uvc_function_get_alt(struct usb_function *f, unsigned interface)
+{
+	struct uvc_device *uvc = to_uvc(f);
+
+	INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface);
+
+	if (interface == uvc->control_intf)
+		return 0;
+	else if (interface != uvc->streaming_intf)
+		return -EINVAL;
+	else
+		return uvc->state == UVC_STATE_STREAMING ? 1 : 0;
+}
+
+static int
+uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
+{
+	struct uvc_device *uvc = to_uvc(f);
+	struct v4l2_event v4l2_event;
+	struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+	INFO(f->config->cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
+
+	if (interface == uvc->control_intf) {
+		if (alt)
+			return -EINVAL;
+
+		if (uvc->state == UVC_STATE_DISCONNECTED) {
+			memset(&v4l2_event, 0, sizeof(v4l2_event));
+			v4l2_event.type = UVC_EVENT_CONNECT;
+			uvc_event->speed = f->config->cdev->gadget->speed;
+			v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+			uvc->state = UVC_STATE_CONNECTED;
+		}
+
+		return 0;
+	}
+
+	if (interface != uvc->streaming_intf)
+		return -EINVAL;
+
+	/* TODO
+	if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep))
+		return alt ? -EINVAL : 0;
+	*/
+
+	switch (alt) {
+	case 0:
+		if (uvc->state != UVC_STATE_STREAMING)
+			return 0;
+
+		if (uvc->video.ep)
+			usb_ep_disable(uvc->video.ep);
+
+		memset(&v4l2_event, 0, sizeof(v4l2_event));
+		v4l2_event.type = UVC_EVENT_STREAMOFF;
+		v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+		uvc->state = UVC_STATE_CONNECTED;
+		break;
+
+	case 1:
+		if (uvc->state != UVC_STATE_CONNECTED)
+			return 0;
+
+		if (uvc->video.ep) {
+			uvc->video.ep->desc = &uvc_streaming_ep;
+			usb_ep_enable(uvc->video.ep);
+		}
+
+		memset(&v4l2_event, 0, sizeof(v4l2_event));
+		v4l2_event.type = UVC_EVENT_STREAMON;
+		v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+		uvc->state = UVC_STATE_STREAMING;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+uvc_function_disable(struct usb_function *f)
+{
+	struct uvc_device *uvc = to_uvc(f);
+	struct v4l2_event v4l2_event;
+
+	INFO(f->config->cdev, "uvc_function_disable\n");
+
+	memset(&v4l2_event, 0, sizeof(v4l2_event));
+	v4l2_event.type = UVC_EVENT_DISCONNECT;
+	v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+	uvc->state = UVC_STATE_DISCONNECTED;
+}
+
+/* --------------------------------------------------------------------------
+ * Connection / disconnection
+ */
+
+void
+uvc_function_connect(struct uvc_device *uvc)
+{
+	struct usb_composite_dev *cdev = uvc->func.config->cdev;
+	int ret;
+
+	if ((ret = usb_function_activate(&uvc->func)) < 0)
+		INFO(cdev, "UVC connect failed with %d\n", ret);
+}
+
+void
+uvc_function_disconnect(struct uvc_device *uvc)
+{
+	struct usb_composite_dev *cdev = uvc->func.config->cdev;
+	int ret;
+
+	if ((ret = usb_function_deactivate(&uvc->func)) < 0)
+		INFO(cdev, "UVC disconnect failed with %d\n", ret);
+}
+
+/* --------------------------------------------------------------------------
+ * USB probe and disconnect
+ */
+
+static int
+uvc_register_video(struct uvc_device *uvc)
+{
+	struct usb_composite_dev *cdev = uvc->func.config->cdev;
+	struct video_device *video;
+
+	/* TODO reference counting. */
+	video = video_device_alloc();
+	if (video == NULL)
+		return -ENOMEM;
+
+	video->parent = &cdev->gadget->dev;
+	video->fops = &uvc_v4l2_fops;
+	video->release = video_device_release;
+	strncpy(video->name, cdev->gadget->name, sizeof(video->name));
+
+	uvc->vdev = video;
+	video_set_drvdata(video, uvc);
+
+	return video_register_device(video, VFL_TYPE_GRABBER, -1);
+}
+
+#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \
+	do { \
+		memcpy(mem, desc, (desc)->bLength); \
+		*(dst)++ = mem; \
+		mem += (desc)->bLength; \
+	} while (0);
+
+#define UVC_COPY_DESCRIPTORS(mem, dst, src) \
+	do { \
+		const struct usb_descriptor_header * const *__src; \
+		for (__src = src; *__src; ++__src) { \
+			memcpy(mem, *__src, (*__src)->bLength); \
+			*dst++ = mem; \
+			mem += (*__src)->bLength; \
+		} \
+	} while (0)
+
+static struct usb_descriptor_header ** __init
+uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
+{
+	struct uvc_input_header_descriptor *uvc_streaming_header;
+	struct uvc_header_descriptor *uvc_control_header;
+	const struct uvc_descriptor_header * const *uvc_streaming_cls;
+	const struct usb_descriptor_header * const *uvc_streaming_std;
+	const struct usb_descriptor_header * const *src;
+	struct usb_descriptor_header **dst;
+	struct usb_descriptor_header **hdr;
+	unsigned int control_size;
+	unsigned int streaming_size;
+	unsigned int n_desc;
+	unsigned int bytes;
+	void *mem;
+
+	uvc_streaming_cls = (speed == USB_SPEED_FULL)
+			  ? uvc->desc.fs_streaming : uvc->desc.hs_streaming;
+	uvc_streaming_std = (speed == USB_SPEED_FULL)
+			  ? uvc_fs_streaming : uvc_hs_streaming;
+
+	/* Descriptors layout
+	 *
+	 * uvc_iad
+	 * uvc_control_intf
+	 * Class-specific UVC control descriptors
+	 * uvc_control_ep
+	 * uvc_control_cs_ep
+	 * uvc_streaming_intf_alt0
+	 * Class-specific UVC streaming descriptors
+	 * uvc_{fs|hs}_streaming
+	 */
+
+	/* Count descriptors and compute their size. */
+	control_size = 0;
+	streaming_size = 0;
+	bytes = uvc_iad.bLength + uvc_control_intf.bLength
+	      + uvc_control_ep.bLength + uvc_control_cs_ep.bLength
+	      + uvc_streaming_intf_alt0.bLength;
+	n_desc = 5;
+
+	for (src = (const struct usb_descriptor_header**)uvc->desc.control; *src; ++src) {
+		control_size += (*src)->bLength;
+		bytes += (*src)->bLength;
+		n_desc++;
+	}
+	for (src = (const struct usb_descriptor_header**)uvc_streaming_cls; *src; ++src) {
+		streaming_size += (*src)->bLength;
+		bytes += (*src)->bLength;
+		n_desc++;
+	}
+	for (src = uvc_streaming_std; *src; ++src) {
+		bytes += (*src)->bLength;
+		n_desc++;
+	}
+
+	mem = kmalloc((n_desc + 1) * sizeof(*src) + bytes, GFP_KERNEL);
+	if (mem == NULL)
+		return NULL;
+
+	hdr = mem;
+	dst = mem;
+	mem += (n_desc + 1) * sizeof(*src);
+
+	/* Copy the descriptors. */
+	UVC_COPY_DESCRIPTOR(mem, dst, &uvc_iad);
+	UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_intf);
+
+	uvc_control_header = mem;
+	UVC_COPY_DESCRIPTORS(mem, dst,
+		(const struct usb_descriptor_header**)uvc->desc.control);
+	uvc_control_header->wTotalLength = cpu_to_le16(control_size);
+	uvc_control_header->bInCollection = 1;
+	uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf;
+
+	UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep);
+	UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep);
+	UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
+
+	uvc_streaming_header = mem;
+	UVC_COPY_DESCRIPTORS(mem, dst,
+		(const struct usb_descriptor_header**)uvc_streaming_cls);
+	uvc_streaming_header->wTotalLength = cpu_to_le16(streaming_size);
+	uvc_streaming_header->bEndpointAddress = uvc_streaming_ep.bEndpointAddress;
+
+	UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming_std);
+
+	*dst = NULL;
+	return hdr;
+}
+
+static void
+uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct uvc_device *uvc = to_uvc(f);
+
+	INFO(cdev, "uvc_function_unbind\n");
+
+	video_unregister_device(uvc->vdev);
+	uvc->control_ep->driver_data = NULL;
+	uvc->video.ep->driver_data = NULL;
+
+	usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
+	kfree(uvc->control_buf);
+
+	kfree(f->descriptors);
+	kfree(f->hs_descriptors);
+
+	kfree(uvc);
+}
+
+static int __init
+uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct uvc_device *uvc = to_uvc(f);
+	struct usb_ep *ep;
+	int ret = -EINVAL;
+
+	INFO(cdev, "uvc_function_bind\n");
+
+	/* Allocate endpoints. */
+	ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
+	if (!ep) {
+		INFO(cdev, "Unable to allocate control EP\n");
+		goto error;
+	}
+	uvc->control_ep = ep;
+	ep->driver_data = uvc;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &uvc_streaming_ep);
+	if (!ep) {
+		INFO(cdev, "Unable to allocate streaming EP\n");
+		goto error;
+	}
+	uvc->video.ep = ep;
+	ep->driver_data = uvc;
+
+	/* Allocate interface IDs. */
+	if ((ret = usb_interface_id(c, f)) < 0)
+		goto error;
+	uvc_iad.bFirstInterface = ret;
+	uvc_control_intf.bInterfaceNumber = ret;
+	uvc->control_intf = ret;
+
+	if ((ret = usb_interface_id(c, f)) < 0)
+		goto error;
+	uvc_streaming_intf_alt0.bInterfaceNumber = ret;
+	uvc_streaming_intf_alt1.bInterfaceNumber = ret;
+	uvc->streaming_intf = ret;
+
+	/* Copy descriptors. */
+	f->descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
+	f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
+
+	/* Preallocate control endpoint request. */
+	uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+	uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL);
+	if (uvc->control_req == NULL || uvc->control_buf == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	uvc->control_req->buf = uvc->control_buf;
+	uvc->control_req->complete = uvc_function_ep0_complete;
+	uvc->control_req->context = uvc;
+
+	/* Avoid letting this gadget enumerate until the userspace server is
+	 * active.
+	 */
+	if ((ret = usb_function_deactivate(f)) < 0)
+		goto error;
+
+	/* Initialise video. */
+	ret = uvc_video_init(&uvc->video);
+	if (ret < 0)
+		goto error;
+
+	/* Register a V4L2 device. */
+	ret = uvc_register_video(uvc);
+	if (ret < 0) {
+		printk(KERN_INFO "Unable to register video device\n");
+		goto error;
+	}
+
+	return 0;
+
+error:
+	if (uvc->vdev)
+		video_device_release(uvc->vdev);
+
+	if (uvc->control_ep)
+		uvc->control_ep->driver_data = NULL;
+	if (uvc->video.ep)
+		uvc->video.ep->driver_data = NULL;
+
+	if (uvc->control_req) {
+		usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
+		kfree(uvc->control_buf);
+	}
+
+	kfree(f->descriptors);
+	kfree(f->hs_descriptors);
+	kfree(f->ss_descriptors);
+	return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * USB gadget function
+ */
+
+/**
+ * uvc_bind_config - add a UVC function to a configuration
+ * @c: the configuration to support the UVC instance
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @uvc_setup(). Caller is also responsible for
+ * calling @uvc_cleanup() before module unload.
+ */
+int __init
+uvc_bind_config(struct usb_configuration *c,
+		const struct uvc_descriptor_header * const *control,
+		const struct uvc_descriptor_header * const *fs_streaming,
+		const struct uvc_descriptor_header * const *hs_streaming)
+{
+	struct uvc_device *uvc;
+	int ret = 0;
+
+	/* TODO Check if the USB device controller supports the required
+	 * features.
+	 */
+	if (!gadget_is_dualspeed(c->cdev->gadget))
+		return -EINVAL;
+
+	uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
+	if (uvc == NULL)
+		return -ENOMEM;
+
+	uvc->state = UVC_STATE_DISCONNECTED;
+
+	/* Validate the descriptors. */
+	if (control == NULL || control[0] == NULL ||
+	    control[0]->bDescriptorSubType != UVC_VC_HEADER)
+		goto error;
+
+	if (fs_streaming == NULL || fs_streaming[0] == NULL ||
+	    fs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER)
+		goto error;
+
+	if (hs_streaming == NULL || hs_streaming[0] == NULL ||
+	    hs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER)
+		goto error;
+
+	uvc->desc.control = control;
+	uvc->desc.fs_streaming = fs_streaming;
+	uvc->desc.hs_streaming = hs_streaming;
+
+	/* Allocate string descriptor numbers. */
+	if ((ret = usb_string_id(c->cdev)) < 0)
+		goto error;
+	uvc_en_us_strings[UVC_STRING_ASSOCIATION_IDX].id = ret;
+	uvc_iad.iFunction = ret;
+
+	if ((ret = usb_string_id(c->cdev)) < 0)
+		goto error;
+	uvc_en_us_strings[UVC_STRING_CONTROL_IDX].id = ret;
+	uvc_control_intf.iInterface = ret;
+
+	if ((ret = usb_string_id(c->cdev)) < 0)
+		goto error;
+	uvc_en_us_strings[UVC_STRING_STREAMING_IDX].id = ret;
+	uvc_streaming_intf_alt0.iInterface = ret;
+	uvc_streaming_intf_alt1.iInterface = ret;
+
+	/* Register the function. */
+	uvc->func.name = "uvc";
+	uvc->func.strings = uvc_function_strings;
+	uvc->func.bind = uvc_function_bind;
+	uvc->func.unbind = uvc_function_unbind;
+	uvc->func.get_alt = uvc_function_get_alt;
+	uvc->func.set_alt = uvc_function_set_alt;
+	uvc->func.disable = uvc_function_disable;
+	uvc->func.setup = uvc_function_setup;
+
+	ret = usb_add_function(c, &uvc->func);
+	if (ret)
+		kfree(uvc);
+
+	return ret;
+
+error:
+	kfree(uvc);
+	return ret;
+}
+
+module_param_named(trace, uvc_gadget_trace_param, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(trace, "Trace level bitmask");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uvc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uvc.h
new file mode 100644
index 0000000..abf8329
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_uvc.h
@@ -0,0 +1,25 @@
+/*
+ *	f_uvc.h  --  USB Video Class Gadget driver
+ *
+ *	Copyright (C) 2009-2010
+ *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#ifndef _F_UVC_H_
+#define _F_UVC_H_
+
+#include <linux/usb/composite.h>
+#include <linux/usb/video.h>
+
+extern int uvc_bind_config(struct usb_configuration *c,
+			   const struct uvc_descriptor_header * const *control,
+			   const struct uvc_descriptor_header * const *fs_streaming,
+			   const struct uvc_descriptor_header * const *hs_streaming);
+
+#endif /* _F_UVC_H_ */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/file_storage.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/file_storage.c
new file mode 100644
index 0000000..a896d73
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/file_storage.c
@@ -0,0 +1,3676 @@
+/*
+ * file_storage.c -- File-backed USB Storage Gadget, for USB development
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+ * The File-backed Storage Gadget acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive or as a CD-ROM drive.  In addition
+ * to providing an example of a genuinely useful gadget driver for a USB
+ * device, it also illustrates a technique of double-buffering for increased
+ * throughput.  Last but not least, it gives an easy way to probe the
+ * behavior of the Mass Storage drivers in a USB host.
+ *
+ * Backing storage is provided by a regular file or a block device, specified
+ * by the "file" module parameter.  Access can be limited to read-only by
+ * setting the optional "ro" module parameter.  (For CD-ROM emulation,
+ * access is always read-only.)  The gadget will indicate that it has
+ * removable media if the optional "removable" module parameter is set.
+ *
+ * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
+ * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
+ * by the optional "transport" module parameter.  It also supports the
+ * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
+ * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
+ * the optional "protocol" module parameter.  In addition, the default
+ * Vendor ID, Product ID, release number and serial number can be overridden.
+ *
+ * There is support for multiple logical units (LUNs), each of which has
+ * its own backing file.  The number of LUNs can be set using the optional
+ * "luns" module parameter (anywhere from 1 to 8), and the corresponding
+ * files are specified using comma-separated lists for "file" and "ro".
+ * The default number of LUNs is taken from the number of "file" elements;
+ * it is 1 if "file" is not given.  If "removable" is not set then a backing
+ * file must be specified for each LUN.  If it is set, then an unspecified
+ * or empty backing filename means the LUN's medium is not loaded.  Ideally
+ * each LUN would be settable independently as a disk drive or a CD-ROM
+ * drive, but currently all LUNs have to be the same type.  The CD-ROM
+ * emulation includes a single data track and no audio tracks; hence there
+ * need be only one backing file per LUN.
+ *
+ * Requirements are modest; only a bulk-in and a bulk-out endpoint are
+ * needed (an interrupt-out endpoint is also needed for CBI).  The memory
+ * requirement amounts to two 16K buffers, size configurable by a parameter.
+ * Support is included for both full-speed and high-speed operation.
+ *
+ * Note that the driver is slightly non-portable in that it assumes a
+ * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
+ * interrupt-in endpoints.  With most device controllers this isn't an
+ * issue, but there may be some with hardware restrictions that prevent
+ * a buffer from being used by more than one endpoint.
+ *
+ * Module options:
+ *
+ *	file=filename[,filename...]
+ *				Required if "removable" is not set, names of
+ *					the files or block devices used for
+ *					backing storage
+ *	serial=HHHH...		Required serial number (string of hex chars)
+ *	ro=b[,b...]		Default false, booleans for read-only access
+ *	removable		Default false, boolean for removable media
+ *	luns=N			Default N = number of filenames, number of
+ *					LUNs to support
+ *	nofua=b[,b...]		Default false, booleans for ignore FUA flag
+ *					in SCSI WRITE(10,12) commands
+ *	stall			Default determined according to the type of
+ *					USB device controller (usually true),
+ *					boolean to permit the driver to halt
+ *					bulk endpoints
+ *	cdrom			Default false, boolean for whether to emulate
+ *					a CD-ROM drive
+ *	transport=XXX		Default BBB, transport name (CB, CBI, or BBB)
+ *	protocol=YYY		Default SCSI, protocol name (RBC, 8020 or
+ *					ATAPI, QIC, UFI, 8070, or SCSI;
+ *					also 1 - 6)
+ *	vendor=0xVVVV		Default 0x0525 (NetChip), USB Vendor ID
+ *	product=0xPPPP		Default 0xa4a5 (FSG), USB Product ID
+ *	release=0xRRRR		Override the USB release number (bcdDevice)
+ *	buflen=N		Default N=16384, buffer size used (will be
+ *					rounded down to a multiple of
+ *					PAGE_CACHE_SIZE)
+ *
+ * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "serial", "ro",
+ * "removable", "luns", "nofua", "stall", and "cdrom" options are available;
+ * default values are used for everything else.
+ *
+ * The pathnames of the backing files and the ro settings are available in
+ * the attribute files "file", "nofua", and "ro" in the lun<n> subdirectory of
+ * the gadget's sysfs directory.  If the "removable" option is set, writing to
+ * these files will simulate ejecting/loading the medium (writing an empty
+ * line means eject) and adjusting a write-enable tab.  Changes to the ro
+ * setting are not allowed when the medium is loaded or if CD-ROM emulation
+ * is being used.
+ *
+ * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
+ * The driver's SCSI command interface was based on the "Information
+ * technology - Small Computer System Interface - 2" document from
+ * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
+ * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.  The single exception
+ * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
+ * "Universal Serial Bus Mass Storage Class UFI Command Specification"
+ * document, Revision 1.0, December 14, 1998, available at
+ * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
+ */
+
+
+/*
+ *				Driver Design
+ *
+ * The FSG driver is fairly straightforward.  There is a main kernel
+ * thread that handles most of the work.  Interrupt routines field
+ * callbacks from the controller driver: bulk- and interrupt-request
+ * completion notifications, endpoint-0 events, and disconnect events.
+ * Completion events are passed to the main thread by wakeup calls.  Many
+ * ep0 requests are handled at interrupt time, but SetInterface,
+ * SetConfiguration, and device reset requests are forwarded to the
+ * thread in the form of "exceptions" using SIGUSR1 signals (since they
+ * should interrupt any ongoing file I/O operations).
+ *
+ * The thread's main routine implements the standard command/data/status
+ * parts of a SCSI interaction.  It and its subroutines are full of tests
+ * for pending signals/exceptions -- all this polling is necessary since
+ * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
+ * indication that the driver really wants to be running in userspace.)
+ * An important point is that so long as the thread is alive it keeps an
+ * open reference to the backing file.  This will prevent unmounting
+ * the backing file's underlying filesystem and could cause problems
+ * during system shutdown, for example.  To prevent such problems, the
+ * thread catches INT, TERM, and KILL signals and converts them into
+ * an EXIT exception.
+ *
+ * In normal operation the main thread is started during the gadget's
+ * fsg_bind() callback and stopped during fsg_unbind().  But it can also
+ * exit when it receives a signal, and there's no point leaving the
+ * gadget running when the thread is dead.  So just before the thread
+ * exits, it deregisters the gadget driver.  This makes things a little
+ * tricky: The driver is deregistered at two places, and the exiting
+ * thread can indirectly call fsg_unbind() which in turn can tell the
+ * thread to exit.  The first problem is resolved through the use of the
+ * REGISTERED atomic bitflag; the driver will only be deregistered once.
+ * The second problem is resolved by having fsg_unbind() check
+ * fsg->state; it won't try to stop the thread if the state is already
+ * FSG_STATE_TERMINATED.
+ *
+ * To provide maximum throughput, the driver uses a circular pipeline of
+ * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
+ * arbitrarily long; in practice the benefits don't justify having more
+ * than 2 stages (i.e., double buffering).  But it helps to think of the
+ * pipeline as being a long one.  Each buffer head contains a bulk-in and
+ * a bulk-out request pointer (since the buffer can be used for both
+ * output and input -- directions always are given from the host's
+ * point of view) as well as a pointer to the buffer and various state
+ * variables.
+ *
+ * Use of the pipeline follows a simple protocol.  There is a variable
+ * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
+ * At any time that buffer head may still be in use from an earlier
+ * request, so each buffer head has a state variable indicating whether
+ * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
+ * buffer head to be EMPTY, filling the buffer either by file I/O or by
+ * USB I/O (during which the buffer head is BUSY), and marking the buffer
+ * head FULL when the I/O is complete.  Then the buffer will be emptied
+ * (again possibly by USB I/O, during which it is marked BUSY) and
+ * finally marked EMPTY again (possibly by a completion routine).
+ *
+ * A module parameter tells the driver to avoid stalling the bulk
+ * endpoints wherever the transport specification allows.  This is
+ * necessary for some UDCs like the SuperH, which cannot reliably clear a
+ * halt on a bulk endpoint.  However, under certain circumstances the
+ * Bulk-only specification requires a stall.  In such cases the driver
+ * will halt the endpoint and set a flag indicating that it should clear
+ * the halt in software during the next device reset.  Hopefully this
+ * will permit everything to work correctly.  Furthermore, although the
+ * specification allows the bulk-out endpoint to halt when the host sends
+ * too much data, implementing this would cause an unavoidable race.
+ * The driver will always use the "no-stall" approach for OUT transfers.
+ *
+ * One subtle point concerns sending status-stage responses for ep0
+ * requests.  Some of these requests, such as device reset, can involve
+ * interrupting an ongoing file I/O operation, which might take an
+ * arbitrarily long time.  During that delay the host might give up on
+ * the original ep0 request and issue a new one.  When that happens the
+ * driver should not notify the host about completion of the original
+ * request, as the host will no longer be waiting for it.  So the driver
+ * assigns to each ep0 request a unique tag, and it keeps track of the
+ * tag value of the request associated with a long-running exception
+ * (device-reset, interface-change, or configuration-change).  When the
+ * exception handler is finished, the status-stage response is submitted
+ * only if the current ep0 request tag is equal to the exception request
+ * tag.  Thus only the most recently received ep0 request will get a
+ * status-stage response.
+ *
+ * Warning: This driver source file is too long.  It ought to be split up
+ * into a header file plus about 3 separate .c files, to handle the details
+ * of the Gadget, USB Mass Storage, and SCSI protocols.
+ */
+
+
+/* #define VERBOSE_DEBUG */
+/* #define DUMP_MSGS */
+
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/freezer.h>
+#include <linux/utsname.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_DESC		"File-backed Storage Gadget"
+#define DRIVER_NAME		"g_file_storage"
+#define DRIVER_VERSION		"1 September 2010"
+
+static       char fsg_string_manufacturer[64];
+static const char fsg_string_product[] = DRIVER_DESC;
+static const char fsg_string_config[] = "Self-powered";
+static const char fsg_string_interface[] = "Mass Storage";
+
+
+#include "storage_common.c"
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*
+ * This driver assumes self-powered hardware and has no way for users to
+ * trigger remote wakeup.  It uses autoconfiguration to select endpoints
+ * and endpoint addresses.
+ */
+
+
+/*-------------------------------------------------------------------------*/
+
+
+/* Encapsulate the module parameter settings */
+
+static struct {
+	char		*file[FSG_MAX_LUNS];
+	char		*serial;
+	bool		ro[FSG_MAX_LUNS];
+	bool		nofua[FSG_MAX_LUNS];
+	unsigned int	num_filenames;
+	unsigned int	num_ros;
+	unsigned int	num_nofuas;
+	unsigned int	nluns;
+
+	bool		removable;
+	bool		can_stall;
+	bool		cdrom;
+
+	char		*transport_parm;
+	char		*protocol_parm;
+	unsigned short	vendor;
+	unsigned short	product;
+	unsigned short	release;
+	unsigned int	buflen;
+
+	int		transport_type;
+	char		*transport_name;
+	int		protocol_type;
+	char		*protocol_name;
+
+} mod_data = {					// Default values
+	.transport_parm		= "BBB",
+	.protocol_parm		= "SCSI",
+	.removable		= 0,
+	.can_stall		= 1,
+	.cdrom			= 0,
+	.vendor			= FSG_VENDOR_ID,
+	.product		= FSG_PRODUCT_ID,
+	.release		= 0xffff,	// Use controller chip type
+	.buflen			= 16384,
+	};
+
+
+module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames,
+		S_IRUGO);
+MODULE_PARM_DESC(file, "names of backing files or devices");
+
+module_param_named(serial, mod_data.serial, charp, S_IRUGO);
+MODULE_PARM_DESC(serial, "USB serial number");
+
+module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
+MODULE_PARM_DESC(ro, "true to force read-only");
+
+module_param_array_named(nofua, mod_data.nofua, bool, &mod_data.num_nofuas,
+		S_IRUGO);
+MODULE_PARM_DESC(nofua, "true to ignore SCSI WRITE(10,12) FUA bit");
+
+module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
+MODULE_PARM_DESC(luns, "number of LUNs");
+
+module_param_named(removable, mod_data.removable, bool, S_IRUGO);
+MODULE_PARM_DESC(removable, "true to simulate removable media");
+
+module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
+MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
+
+module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
+MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
+
+/* In the non-TEST version, only the module parameters listed above
+ * are available. */
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+
+module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO);
+MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)");
+
+module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO);
+MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, "
+		"8070, or SCSI)");
+
+module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO);
+MODULE_PARM_DESC(vendor, "USB Vendor ID");
+
+module_param_named(product, mod_data.product, ushort, S_IRUGO);
+MODULE_PARM_DESC(product, "USB Product ID");
+
+module_param_named(release, mod_data.release, ushort, S_IRUGO);
+MODULE_PARM_DESC(release, "USB release number");
+
+module_param_named(buflen, mod_data.buflen, uint, S_IRUGO);
+MODULE_PARM_DESC(buflen, "I/O buffer size");
+
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+
+/*
+ * These definitions will permit the compiler to avoid generating code for
+ * parts of the driver that aren't used in the non-TEST version.  Even gcc
+ * can recognize when a test of a constant expression yields a dead code
+ * path.
+ */
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+
+#define transport_is_bbb()	(mod_data.transport_type == USB_PR_BULK)
+#define transport_is_cbi()	(mod_data.transport_type == USB_PR_CBI)
+#define protocol_is_scsi()	(mod_data.protocol_type == USB_SC_SCSI)
+
+#else
+
+#define transport_is_bbb()	1
+#define transport_is_cbi()	0
+#define protocol_is_scsi()	1
+
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+
+/*-------------------------------------------------------------------------*/
+
+
+struct fsg_dev {
+	/* lock protects: state, all the req_busy's, and cbbuf_cmnd */
+	spinlock_t		lock;
+	struct usb_gadget	*gadget;
+
+	/* filesem protects: backing files in use */
+	struct rw_semaphore	filesem;
+
+	/* reference counting: wait until all LUNs are released */
+	struct kref		ref;
+
+	struct usb_ep		*ep0;		// Handy copy of gadget->ep0
+	struct usb_request	*ep0req;	// For control responses
+	unsigned int		ep0_req_tag;
+	const char		*ep0req_name;
+
+	struct usb_request	*intreq;	// For interrupt responses
+	int			intreq_busy;
+	struct fsg_buffhd	*intr_buffhd;
+
+	unsigned int		bulk_out_maxpacket;
+	enum fsg_state		state;		// For exception handling
+	unsigned int		exception_req_tag;
+
+	u8			config, new_config;
+
+	unsigned int		running : 1;
+	unsigned int		bulk_in_enabled : 1;
+	unsigned int		bulk_out_enabled : 1;
+	unsigned int		intr_in_enabled : 1;
+	unsigned int		phase_error : 1;
+	unsigned int		short_packet_received : 1;
+	unsigned int		bad_lun_okay : 1;
+
+	unsigned long		atomic_bitflags;
+#define REGISTERED		0
+#define IGNORE_BULK_OUT		1
+#define SUSPENDED		2
+
+	struct usb_ep		*bulk_in;
+	struct usb_ep		*bulk_out;
+	struct usb_ep		*intr_in;
+
+	struct fsg_buffhd	*next_buffhd_to_fill;
+	struct fsg_buffhd	*next_buffhd_to_drain;
+
+	int			thread_wakeup_needed;
+	struct completion	thread_notifier;
+	struct task_struct	*thread_task;
+
+	int			cmnd_size;
+	u8			cmnd[MAX_COMMAND_SIZE];
+	enum data_direction	data_dir;
+	u32			data_size;
+	u32			data_size_from_cmnd;
+	u32			tag;
+	unsigned int		lun;
+	u32			residue;
+	u32			usb_amount_left;
+
+	/* The CB protocol offers no way for a host to know when a command
+	 * has completed.  As a result the next command may arrive early,
+	 * and we will still have to handle it.  For that reason we need
+	 * a buffer to store new commands when using CB (or CBI, which
+	 * does not oblige a host to wait for command completion either). */
+	int			cbbuf_cmnd_size;
+	u8			cbbuf_cmnd[MAX_COMMAND_SIZE];
+
+	unsigned int		nluns;
+	struct fsg_lun		*luns;
+	struct fsg_lun		*curlun;
+	/* Must be the last entry */
+	struct fsg_buffhd	buffhds[];
+};
+
+typedef void (*fsg_routine_t)(struct fsg_dev *);
+
+static int exception_in_progress(struct fsg_dev *fsg)
+{
+	return (fsg->state > FSG_STATE_IDLE);
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void set_bulk_out_req_length(struct fsg_dev *fsg,
+		struct fsg_buffhd *bh, unsigned int length)
+{
+	unsigned int	rem;
+
+	bh->bulk_out_intended_length = length;
+	rem = length % fsg->bulk_out_maxpacket;
+	if (rem > 0)
+		length += fsg->bulk_out_maxpacket - rem;
+	bh->outreq->length = length;
+}
+
+static struct fsg_dev			*the_fsg;
+static struct usb_gadget_driver		fsg_driver;
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+{
+	const char	*name;
+
+	if (ep == fsg->bulk_in)
+		name = "bulk-in";
+	else if (ep == fsg->bulk_out)
+		name = "bulk-out";
+	else
+		name = ep->name;
+	DBG(fsg, "%s set halt\n", name);
+	return usb_ep_set_halt(ep);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * DESCRIPTORS ... most are static, but strings and (full) configuration
+ * descriptors are built on demand.  Also the (static) config and interface
+ * descriptors are adjusted during fsg_bind().
+ */
+
+/* There is only one configuration. */
+#define	CONFIG_VALUE		1
+
+static struct usb_device_descriptor
+device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+
+	/* The next three values can be overridden by module parameters */
+	.idVendor =		cpu_to_le16(FSG_VENDOR_ID),
+	.idProduct =		cpu_to_le16(FSG_PRODUCT_ID),
+	.bcdDevice =		cpu_to_le16(0xffff),
+
+	.iManufacturer =	FSG_STRING_MANUFACTURER,
+	.iProduct =		FSG_STRING_PRODUCT,
+	.iSerialNumber =	FSG_STRING_SERIAL,
+	.bNumConfigurations =	1,
+};
+
+static struct usb_config_descriptor
+config_desc = {
+	.bLength =		sizeof config_desc,
+	.bDescriptorType =	USB_DT_CONFIG,
+
+	/* wTotalLength computed by usb_gadget_config_buf() */
+	.bNumInterfaces =	1,
+	.bConfigurationValue =	CONFIG_VALUE,
+	.iConfiguration =	FSG_STRING_CONFIG,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =		CONFIG_USB_GADGET_VBUS_DRAW / 2,
+};
+
+
+static struct usb_qualifier_descriptor
+dev_qualifier = {
+	.bLength =		sizeof dev_qualifier,
+	.bDescriptorType =	USB_DT_DEVICE_QUALIFIER,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+
+	.bNumConfigurations =	1,
+};
+
+static int populate_bos(struct fsg_dev *fsg, u8 *buf)
+{
+	memcpy(buf, &fsg_bos_desc, USB_DT_BOS_SIZE);
+	buf += USB_DT_BOS_SIZE;
+
+	memcpy(buf, &fsg_ext_cap_desc, USB_DT_USB_EXT_CAP_SIZE);
+	buf += USB_DT_USB_EXT_CAP_SIZE;
+
+	memcpy(buf, &fsg_ss_cap_desc, USB_DT_USB_SS_CAP_SIZE);
+
+	return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE
+		+ USB_DT_USB_EXT_CAP_SIZE;
+}
+
+/*
+ * Config descriptors must agree with the code that sets configurations
+ * and with code managing interfaces and their altsettings.  They must
+ * also handle different speeds and other-speed requests.
+ */
+static int populate_config_buf(struct usb_gadget *gadget,
+		u8 *buf, u8 type, unsigned index)
+{
+	enum usb_device_speed			speed = gadget->speed;
+	int					len;
+	const struct usb_descriptor_header	**function;
+
+	if (index > 0)
+		return -EINVAL;
+
+	if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
+		speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
+	function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH
+		? (const struct usb_descriptor_header **)fsg_hs_function
+		: (const struct usb_descriptor_header **)fsg_fs_function;
+
+	/* for now, don't advertise srp-only devices */
+	if (!gadget_is_otg(gadget))
+		function++;
+
+	len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
+	((struct usb_config_descriptor *) buf)->bDescriptorType = type;
+	return len;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+/* Caller must hold fsg->lock */
+static void wakeup_thread(struct fsg_dev *fsg)
+{
+	/* Tell the main thread that something has happened */
+	fsg->thread_wakeup_needed = 1;
+	if (fsg->thread_task)
+		wake_up_process(fsg->thread_task);
+}
+
+
+static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
+{
+	unsigned long		flags;
+
+	/* Do nothing if a higher-priority exception is already in progress.
+	 * If a lower-or-equal priority exception is in progress, preempt it
+	 * and notify the main thread by sending it a signal. */
+	spin_lock_irqsave(&fsg->lock, flags);
+	if (fsg->state <= new_state) {
+		fsg->exception_req_tag = fsg->ep0_req_tag;
+		fsg->state = new_state;
+		if (fsg->thread_task)
+			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+					fsg->thread_task);
+	}
+	spin_unlock_irqrestore(&fsg->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* The disconnect callback and ep0 routines.  These always run in_irq,
+ * except that ep0_queue() is called in the main thread to acknowledge
+ * completion of various requests: set config, set interface, and
+ * Bulk-only device reset. */
+
+static void fsg_disconnect(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+
+	DBG(fsg, "disconnect or port reset\n");
+	raise_exception(fsg, FSG_STATE_DISCONNECT);
+}
+
+
+static int ep0_queue(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
+	if (rc != 0 && rc != -ESHUTDOWN) {
+
+		/* We can't do much more than wait for a reset */
+		WARNING(fsg, "error in submission: %s --> %d\n",
+				fsg->ep0->name, rc);
+	}
+	return rc;
+}
+
+static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = ep->driver_data;
+
+	if (req->actual > 0)
+		dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		// Request was cancelled
+		usb_ep_fifo_flush(ep);
+
+	if (req->status == 0 && req->context)
+		((fsg_routine_t) (req->context))(fsg);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk and interrupt endpoint completion handlers.
+ * These always run in_irq. */
+
+static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		// Request was cancelled
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&fsg->lock);
+	bh->inreq_busy = 0;
+	bh->state = BUF_STATE_EMPTY;
+	wakeup_thread(fsg);
+	spin_unlock(&fsg->lock);
+}
+
+static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	dump_msg(fsg, "bulk-out", req->buf, req->actual);
+	if (req->status || req->actual != bh->bulk_out_intended_length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual,
+				bh->bulk_out_intended_length);
+	if (req->status == -ECONNRESET)		// Request was cancelled
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&fsg->lock);
+	bh->outreq_busy = 0;
+	bh->state = BUF_STATE_FULL;
+	wakeup_thread(fsg);
+	spin_unlock(&fsg->lock);
+}
+
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		// Request was cancelled
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&fsg->lock);
+	fsg->intreq_busy = 0;
+	bh->state = BUF_STATE_EMPTY;
+	wakeup_thread(fsg);
+	spin_unlock(&fsg->lock);
+}
+
+#else
+static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
+{}
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Ep0 class-specific handlers.  These always run in_irq. */
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = fsg->ep0req;
+	static u8		cbi_reset_cmnd[6] = {
+			SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff};
+
+	/* Error in command transfer? */
+	if (req->status || req->length != req->actual ||
+			req->actual < 6 || req->actual > MAX_COMMAND_SIZE) {
+
+		/* Not all controllers allow a protocol stall after
+		 * receiving control-out data, but we'll try anyway. */
+		fsg_set_halt(fsg, fsg->ep0);
+		return;			// Wait for reset
+	}
+
+	/* Is it the special reset command? */
+	if (req->actual >= sizeof cbi_reset_cmnd &&
+			memcmp(req->buf, cbi_reset_cmnd,
+				sizeof cbi_reset_cmnd) == 0) {
+
+		/* Raise an exception to stop the current operation
+		 * and reinitialize our state. */
+		DBG(fsg, "cbi reset request\n");
+		raise_exception(fsg, FSG_STATE_RESET);
+		return;
+	}
+
+	VDBG(fsg, "CB[I] accept device-specific command\n");
+	spin_lock(&fsg->lock);
+
+	/* Save the command for later */
+	if (fsg->cbbuf_cmnd_size)
+		WARNING(fsg, "CB[I] overwriting previous command\n");
+	fsg->cbbuf_cmnd_size = req->actual;
+	memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
+
+	wakeup_thread(fsg);
+	spin_unlock(&fsg->lock);
+}
+
+#else
+static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{}
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+
+static int class_setup_req(struct fsg_dev *fsg,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_request	*req = fsg->ep0req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16                     w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	if (!fsg->config)
+		return value;
+
+	/* Handle Bulk-only class-specific requests */
+	if (transport_is_bbb()) {
+		switch (ctrl->bRequest) {
+
+		case US_BULK_RESET_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_OUT |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_index != 0 || w_value != 0 || w_length != 0) {
+				value = -EDOM;
+				break;
+			}
+
+			/* Raise an exception to stop the current operation
+			 * and reinitialize our state. */
+			DBG(fsg, "bulk reset request\n");
+			raise_exception(fsg, FSG_STATE_RESET);
+			value = DELAYED_STATUS;
+			break;
+
+		case US_BULK_GET_MAX_LUN:
+			if (ctrl->bRequestType != (USB_DIR_IN |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_index != 0 || w_value != 0 || w_length != 1) {
+				value = -EDOM;
+				break;
+			}
+			VDBG(fsg, "get max LUN\n");
+			*(u8 *) req->buf = fsg->nluns - 1;
+			value = 1;
+			break;
+		}
+	}
+
+	/* Handle CBI class-specific requests */
+	else {
+		switch (ctrl->bRequest) {
+
+		case USB_CBI_ADSC_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_OUT |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_index != 0 || w_value != 0) {
+				value = -EDOM;
+				break;
+			}
+			if (w_length > MAX_COMMAND_SIZE) {
+				value = -EOVERFLOW;
+				break;
+			}
+			value = w_length;
+			fsg->ep0req->context = received_cbi_adsc;
+			break;
+		}
+	}
+
+	if (value == -EOPNOTSUPP)
+		VDBG(fsg,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			le16_to_cpu(ctrl->wValue), w_index, w_length);
+	return value;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Ep0 standard request handlers.  These always run in_irq. */
+
+static int standard_setup_req(struct fsg_dev *fsg,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_request	*req = fsg->ep0req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+
+	/* Usually this just stores reply data in the pre-allocated ep0 buffer,
+	 * but config change events will also reconfigure hardware. */
+	switch (ctrl->bRequest) {
+
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
+				USB_RECIP_DEVICE))
+			break;
+		switch (w_value >> 8) {
+
+		case USB_DT_DEVICE:
+			VDBG(fsg, "get device descriptor\n");
+			device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
+			value = sizeof device_desc;
+			memcpy(req->buf, &device_desc, value);
+			break;
+		case USB_DT_DEVICE_QUALIFIER:
+			VDBG(fsg, "get device qualifier\n");
+			if (!gadget_is_dualspeed(fsg->gadget) ||
+					fsg->gadget->speed == USB_SPEED_SUPER)
+				break;
+			/*
+			 * Assume ep0 uses the same maxpacket value for both
+			 * speeds
+			 */
+			dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
+			value = sizeof dev_qualifier;
+			memcpy(req->buf, &dev_qualifier, value);
+			break;
+
+		case USB_DT_OTHER_SPEED_CONFIG:
+			VDBG(fsg, "get other-speed config descriptor\n");
+			if (!gadget_is_dualspeed(fsg->gadget) ||
+					fsg->gadget->speed == USB_SPEED_SUPER)
+				break;
+			goto get_config;
+		case USB_DT_CONFIG:
+			VDBG(fsg, "get configuration descriptor\n");
+get_config:
+			value = populate_config_buf(fsg->gadget,
+					req->buf,
+					w_value >> 8,
+					w_value & 0xff);
+			break;
+
+		case USB_DT_STRING:
+			VDBG(fsg, "get string descriptor\n");
+
+			/* wIndex == language code */
+			value = usb_gadget_get_string(&fsg_stringtab,
+					w_value & 0xff, req->buf);
+			break;
+
+		case USB_DT_BOS:
+			VDBG(fsg, "get bos descriptor\n");
+
+			if (gadget_is_superspeed(fsg->gadget))
+				value = populate_bos(fsg, req->buf);
+			break;
+		}
+
+		break;
+
+	/* One config, two speeds */
+	case USB_REQ_SET_CONFIGURATION:
+		if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
+				USB_RECIP_DEVICE))
+			break;
+		VDBG(fsg, "set configuration\n");
+		if (w_value == CONFIG_VALUE || w_value == 0) {
+			fsg->new_config = w_value;
+
+			/* Raise an exception to wipe out previous transaction
+			 * state (queued bufs, etc) and set the new config. */
+			raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
+			value = DELAYED_STATUS;
+		}
+		break;
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
+				USB_RECIP_DEVICE))
+			break;
+		VDBG(fsg, "get configuration\n");
+		*(u8 *) req->buf = fsg->config;
+		value = 1;
+		break;
+
+	case USB_REQ_SET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD |
+				USB_RECIP_INTERFACE))
+			break;
+		if (fsg->config && w_index == 0) {
+
+			/* Raise an exception to wipe out previous transaction
+			 * state (queued bufs, etc) and install the new
+			 * interface altsetting. */
+			raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
+			value = DELAYED_STATUS;
+		}
+		break;
+	case USB_REQ_GET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
+				USB_RECIP_INTERFACE))
+			break;
+		if (!fsg->config)
+			break;
+		if (w_index != 0) {
+			value = -EDOM;
+			break;
+		}
+		VDBG(fsg, "get interface\n");
+		*(u8 *) req->buf = 0;
+		value = 1;
+		break;
+
+	default:
+		VDBG(fsg,
+			"unknown control req %02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, le16_to_cpu(ctrl->wLength));
+	}
+
+	return value;
+}
+
+
+static int fsg_setup(struct usb_gadget *gadget,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+	int			rc;
+	int			w_length = le16_to_cpu(ctrl->wLength);
+
+	++fsg->ep0_req_tag;		// Record arrival of a new request
+	fsg->ep0req->context = NULL;
+	fsg->ep0req->length = 0;
+	dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
+
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
+		rc = class_setup_req(fsg, ctrl);
+	else
+		rc = standard_setup_req(fsg, ctrl);
+
+	/* Respond with data/status or defer until later? */
+	if (rc >= 0 && rc != DELAYED_STATUS) {
+		rc = min(rc, w_length);
+		fsg->ep0req->length = rc;
+		fsg->ep0req->zero = rc < w_length;
+		fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
+				"ep0-in" : "ep0-out");
+		rc = ep0_queue(fsg);
+	}
+
+	/* Device either stalls (rc < 0) or reports success */
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+		struct usb_request *req, int *pbusy,
+		enum fsg_buffer_state *state)
+{
+	int	rc;
+
+	if (ep == fsg->bulk_in)
+		dump_msg(fsg, "bulk-in", req->buf, req->length);
+	else if (ep == fsg->intr_in)
+		dump_msg(fsg, "intr-in", req->buf, req->length);
+
+	spin_lock_irq(&fsg->lock);
+	*pbusy = 1;
+	*state = BUF_STATE_BUSY;
+	spin_unlock_irq(&fsg->lock);
+	rc = usb_ep_queue(ep, req, GFP_KERNEL);
+	if (rc != 0) {
+		*pbusy = 0;
+		*state = BUF_STATE_EMPTY;
+
+		/* We can't do much more than wait for a reset */
+
+		/* Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled. */
+		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
+						req->length == 0))
+			WARNING(fsg, "error in submission: %s --> %d\n",
+					ep->name, rc);
+	}
+}
+
+
+static int sleep_thread(struct fsg_dev *fsg)
+{
+	int	rc = 0;
+
+	/* Wait until a signal arrives or we are woken up */
+	for (;;) {
+		try_to_freeze();
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+		if (fsg->thread_wakeup_needed)
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	fsg->thread_wakeup_needed = 0;
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_dev *fsg)
+{
+	struct fsg_lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u32			amount_left;
+	loff_t			file_offset, file_offset_tmp;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == READ_6)
+		lba = get_unaligned_be24(&fsg->cmnd[1]);
+	else {
+		lba = get_unaligned_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = don't read from the
+		 * cache), but we don't implement them. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+	file_offset = ((loff_t) lba) << curlun->blkbits;
+
+	/* Carry out the file reads */
+	amount_left = fsg->data_size_from_cmnd;
+	if (unlikely(amount_left == 0))
+		return -EIO;		// No default reply
+
+	for (;;) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount.
+		 * But don't read more than the buffer size.
+		 * And don't try to read past the end of the file.
+		 */
+		amount = min((unsigned int) amount_left, mod_data.buflen);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+
+		/* Wait for the next buffer to become available */
+		bh = fsg->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(fsg);
+			if (rc)
+				return rc;
+		}
+
+		/* If we were asked to read past the end of file,
+		 * end with an empty buffer. */
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			bh->inreq->length = 0;
+			bh->state = BUF_STATE_FULL;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file read: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file read: %d/%u\n",
+					(int) nread, amount);
+			nread = round_down(nread, curlun->blksize);
+		}
+		file_offset  += nread;
+		amount_left  -= nread;
+		fsg->residue -= nread;
+
+		/* Except at the end of the transfer, nread will be
+		 * equal to the buffer size, which is divisible by the
+		 * bulk-in maxpacket size.
+		 */
+		bh->inreq->length = nread;
+		bh->state = BUF_STATE_FULL;
+
+		/* If an error occurred, report it and its position */
+		if (nread < amount) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		if (amount_left == 0)
+			break;		// No more left to read
+
+		/* Send this buffer and go read some more */
+		bh->inreq->zero = 0;
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+		fsg->next_buffhd_to_fill = bh->next;
+	}
+
+	return -EIO;		// No default reply
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_dev *fsg)
+{
+	struct fsg_lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			get_some_more;
+	u32			amount_left_to_req, amount_left_to_write;
+	loff_t			usb_offset, file_offset, file_offset_tmp;
+	unsigned int		amount;
+	ssize_t			nwritten;
+	int			rc;
+
+	if (curlun->ro) {
+		curlun->sense_data = SS_WRITE_PROTECTED;
+		return -EINVAL;
+	}
+	spin_lock(&curlun->filp->f_lock);
+	curlun->filp->f_flags &= ~O_SYNC;	// Default is not to wait
+	spin_unlock(&curlun->filp->f_lock);
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == WRITE_6)
+		lba = get_unaligned_be24(&fsg->cmnd[1]);
+	else {
+		lba = get_unaligned_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = write directly to the
+		 * medium).  We don't implement DPO; we implement FUA by
+		 * performing synchronous output. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+		/* FUA */
+		if (!curlun->nofua && (fsg->cmnd[1] & 0x08)) {
+			spin_lock(&curlun->filp->f_lock);
+			curlun->filp->f_flags |= O_DSYNC;
+			spin_unlock(&curlun->filp->f_lock);
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* Carry out the file writes */
+	get_some_more = 1;
+	file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
+	amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
+
+	while (amount_left_to_write > 0) {
+
+		/* Queue a request for more data from the host */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+			/* Figure out how much we want to get:
+			 * Try to get the remaining amount,
+			 * but not more than the buffer size.
+			 */
+			amount = min(amount_left_to_req, mod_data.buflen);
+
+			/* Beyond the end of the backing file? */
+			if (usb_offset >= curlun->file_length) {
+				get_some_more = 0;
+				curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+				curlun->sense_data_info = usb_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				continue;
+			}
+
+			/* Get the next buffer */
+			usb_offset += amount;
+			fsg->usb_amount_left -= amount;
+			amount_left_to_req -= amount;
+			if (amount_left_to_req == 0)
+				get_some_more = 0;
+
+			/* Except at the end of the transfer, amount will be
+			 * equal to the buffer size, which is divisible by
+			 * the bulk-out maxpacket size.
+			 */
+			set_bulk_out_req_length(fsg, bh, amount);
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			continue;
+		}
+
+		/* Write the received data to the backing file */
+		bh = fsg->next_buffhd_to_drain;
+		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+			break;			// We stopped early
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			fsg->next_buffhd_to_drain = bh->next;
+			bh->state = BUF_STATE_EMPTY;
+
+			/* Did something go wrong with the transfer? */
+			if (bh->outreq->status != 0) {
+				curlun->sense_data = SS_COMMUNICATION_FAILURE;
+				curlun->sense_data_info = file_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				break;
+			}
+
+			amount = bh->outreq->actual;
+			if (curlun->file_length - file_offset < amount) {
+				LERROR(curlun,
+	"write %u @ %llu beyond end %llu\n",
+	amount, (unsigned long long) file_offset,
+	(unsigned long long) curlun->file_length);
+				amount = curlun->file_length - file_offset;
+			}
+
+			/* Don't accept excess data.  The spec doesn't say
+			 * what to do in this case.  We'll ignore the error.
+			 */
+			amount = min(amount, bh->bulk_out_intended_length);
+
+			/* Don't write a partial block */
+			amount = round_down(amount, curlun->blksize);
+			if (amount == 0)
+				goto empty_write;
+
+			/* Perform the write */
+			file_offset_tmp = file_offset;
+			nwritten = vfs_write(curlun->filp,
+					(char __user *) bh->buf,
+					amount, &file_offset_tmp);
+			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+					(unsigned long long) file_offset,
+					(int) nwritten);
+			if (signal_pending(current))
+				return -EINTR;		// Interrupted!
+
+			if (nwritten < 0) {
+				LDBG(curlun, "error in file write: %d\n",
+						(int) nwritten);
+				nwritten = 0;
+			} else if (nwritten < amount) {
+				LDBG(curlun, "partial file write: %d/%u\n",
+						(int) nwritten, amount);
+				nwritten = round_down(nwritten, curlun->blksize);
+			}
+			file_offset += nwritten;
+			amount_left_to_write -= nwritten;
+			fsg->residue -= nwritten;
+
+			/* If an error occurred, report it and its position */
+			if (nwritten < amount) {
+				curlun->sense_data = SS_WRITE_ERROR;
+				curlun->sense_data_info = file_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				break;
+			}
+
+ empty_write:
+			/* Did the host decide to stop early? */
+			if (bh->outreq->actual < bh->bulk_out_intended_length) {
+				fsg->short_packet_received = 1;
+				break;
+			}
+			continue;
+		}
+
+		/* Wait for something to happen */
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	return -EIO;		// No default reply
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_synchronize_cache(struct fsg_dev *fsg)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	int		rc;
+
+	/* We ignore the requested LBA and write out all file's
+	 * dirty data buffers. */
+	rc = fsg_lun_fsync_sub(curlun);
+	if (rc)
+		curlun->sense_data = SS_WRITE_ERROR;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct fsg_lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode = filp->f_path.dentry->d_inode;
+	unsigned long	rc;
+
+	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_dev *fsg)
+{
+	struct fsg_lun		*curlun = fsg->curlun;
+	u32			lba;
+	u32			verification_length;
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	loff_t			file_offset, file_offset_tmp;
+	u32			amount_left;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	lba = get_unaligned_be32(&fsg->cmnd[2]);
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it. */
+	if ((fsg->cmnd[1] & ~0x10) != 0) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	verification_length = get_unaligned_be16(&fsg->cmnd[7]);
+	if (unlikely(verification_length == 0))
+		return -EIO;		// No default reply
+
+	/* Prepare to carry out the file verify */
+	amount_left = verification_length << curlun->blkbits;
+	file_offset = ((loff_t) lba) << curlun->blkbits;
+
+	/* Write out all the dirty buffers before invalidating them */
+	fsg_lun_fsync_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	invalidate_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* Just try to read the requested blocks */
+	while (amount_left > 0) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount, but not more than
+		 * the buffer size.
+		 * And don't try to read past the end of the file.
+		 */
+		amount = min((unsigned int) amount_left, mod_data.buflen);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file verify: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file verify: %d/%u\n",
+					(int) nread, amount);
+			nread = round_down(nread, curlun->blksize);
+		}
+		if (nread == 0) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+		file_offset += nread;
+		amount_left -= nread;
+	}
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	u8	*buf = (u8 *) bh->buf;
+
+	static char vendor_id[] = "Linux   ";
+	static char product_disk_id[] = "File-Stor Gadget";
+	static char product_cdrom_id[] = "File-CD Gadget  ";
+
+	if (!fsg->curlun) {		// Unsupported LUNs are okay
+		fsg->bad_lun_okay = 1;
+		memset(buf, 0, 36);
+		buf[0] = 0x7f;		// Unsupported, no device-type
+		buf[4] = 31;		// Additional length
+		return 36;
+	}
+
+	memset(buf, 0, 8);
+	buf[0] = (mod_data.cdrom ? TYPE_ROM : TYPE_DISK);
+	if (mod_data.removable)
+		buf[1] = 0x80;
+	buf[2] = 2;		// ANSI SCSI level 2
+	buf[3] = 2;		// SCSI-2 INQUIRY data format
+	buf[4] = 31;		// Additional length
+				// No special options
+	sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
+			(mod_data.cdrom ? product_cdrom_id :
+				product_disk_id),
+			mod_data.release);
+	return 36;
+}
+
+
+static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+	u32		sd, sdinfo;
+	int		valid;
+
+	/*
+	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+	 *
+	 * If a REQUEST SENSE command is received from an initiator
+	 * with a pending unit attention condition (before the target
+	 * generates the contingent allegiance condition), then the
+	 * target shall either:
+	 *   a) report any pending sense data and preserve the unit
+	 *	attention condition on the logical unit, or,
+	 *   b) report the unit attention condition, may discard any
+	 *	pending sense data, and clear the unit attention
+	 *	condition on the logical unit for that initiator.
+	 *
+	 * FSG normally uses option a); enable this code to use option b).
+	 */
+#if 0
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+	}
+#endif
+
+	if (!curlun) {		// Unsupported LUNs are okay
+		fsg->bad_lun_okay = 1;
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+		sdinfo = 0;
+		valid = 0;
+	} else {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+		valid = curlun->info_valid << 7;
+		curlun->sense_data = SS_NO_SENSE;
+		curlun->sense_data_info = 0;
+		curlun->info_valid = 0;
+	}
+
+	memset(buf, 0, 18);
+	buf[0] = valid | 0x70;			// Valid, current error
+	buf[2] = SK(sd);
+	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
+	buf[7] = 18 - 8;			// Additional sense length
+	buf[12] = ASC(sd);
+	buf[13] = ASCQ(sd);
+	return 18;
+}
+
+
+static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	u32		lba = get_unaligned_be32(&fsg->cmnd[2]);
+	int		pmi = fsg->cmnd[8];
+	u8		*buf = (u8 *) bh->buf;
+
+	/* Check the PMI and LBA fields */
+	if (pmi > 1 || (pmi == 0 && lba != 0)) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
+						/* Max logical block */
+	put_unaligned_be32(curlun->blksize, &buf[4]);	/* Block length */
+	return 8;
+}
+
+
+static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	int		msf = fsg->cmnd[1] & 0x02;
+	u32		lba = get_unaligned_be32(&fsg->cmnd[2]);
+	u8		*buf = (u8 *) bh->buf;
+
+	if ((fsg->cmnd[1] & ~0x02) != 0) {		/* Mask away MSF */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 8);
+	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
+	store_cdrom_address(&buf[4], msf, lba);
+	return 8;
+}
+
+
+static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	int		msf = fsg->cmnd[1] & 0x02;
+	int		start_track = fsg->cmnd[6];
+	u8		*buf = (u8 *) bh->buf;
+
+	if ((fsg->cmnd[1] & ~0x02) != 0 ||		/* Mask away MSF */
+			start_track > 1) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 20);
+	buf[1] = (20-2);		/* TOC data length */
+	buf[2] = 1;			/* First track number */
+	buf[3] = 1;			/* Last track number */
+	buf[5] = 0x16;			/* Data track, copying allowed */
+	buf[6] = 0x01;			/* Only track is number 1 */
+	store_cdrom_address(&buf[8], msf, 0);
+
+	buf[13] = 0x16;			/* Lead-out track is data */
+	buf[14] = 0xAA;			/* Lead-out track number */
+	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
+	return 20;
+}
+
+
+static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	int		mscmnd = fsg->cmnd[0];
+	u8		*buf = (u8 *) bh->buf;
+	u8		*buf0 = buf;
+	int		pc, page_code;
+	int		changeable_values, all_pages;
+	int		valid_page = 0;
+	int		len, limit;
+
+	if ((fsg->cmnd[1] & ~0x08) != 0) {		// Mask away DBD
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	pc = fsg->cmnd[2] >> 6;
+	page_code = fsg->cmnd[2] & 0x3f;
+	if (pc == 3) {
+		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+	changeable_values = (pc == 1);
+	all_pages = (page_code == 0x3f);
+
+	/* Write the mode parameter header.  Fixed values are: default
+	 * medium type, no cache control (DPOFUA), and no block descriptors.
+	 * The only variable value is the WriteProtect bit.  We will fill in
+	 * the mode data length later. */
+	memset(buf, 0, 8);
+	if (mscmnd == MODE_SENSE) {
+		buf[2] = (curlun->ro ? 0x80 : 0x00);		// WP, DPOFUA
+		buf += 4;
+		limit = 255;
+	} else {			// MODE_SENSE_10
+		buf[3] = (curlun->ro ? 0x80 : 0x00);		// WP, DPOFUA
+		buf += 8;
+		limit = 65535;		// Should really be mod_data.buflen
+	}
+
+	/* No block descriptors */
+
+	/* The mode pages, in numerical order.  The only page we support
+	 * is the Caching page. */
+	if (page_code == 0x08 || all_pages) {
+		valid_page = 1;
+		buf[0] = 0x08;		// Page code
+		buf[1] = 10;		// Page length
+		memset(buf+2, 0, 10);	// None of the fields are changeable
+
+		if (!changeable_values) {
+			buf[2] = 0x04;	// Write cache enable,
+					// Read cache not disabled
+					// No cache retention priorities
+			put_unaligned_be16(0xffff, &buf[4]);
+					/* Don't disable prefetch */
+					/* Minimum prefetch = 0 */
+			put_unaligned_be16(0xffff, &buf[8]);
+					/* Maximum prefetch */
+			put_unaligned_be16(0xffff, &buf[10]);
+					/* Maximum prefetch ceiling */
+		}
+		buf += 12;
+	}
+
+	/* Check that a valid page was requested and the mode data length
+	 * isn't too long. */
+	len = buf - buf0;
+	if (!valid_page || len > limit) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	/*  Store the mode data length */
+	if (mscmnd == MODE_SENSE)
+		buf0[0] = len - 1;
+	else
+		put_unaligned_be16(len - 2, buf0);
+	return len;
+}
+
+
+static int do_start_stop(struct fsg_dev *fsg)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	int		loej, start;
+
+	if (!mod_data.removable) {
+		curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	}
+
+	// int immed = fsg->cmnd[1] & 0x01;
+	loej = fsg->cmnd[4] & 0x02;
+	start = fsg->cmnd[4] & 0x01;
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+	if ((fsg->cmnd[1] & ~0x01) != 0 ||		// Mask away Immed
+			(fsg->cmnd[4] & ~0x03) != 0) {	// Mask LoEj, Start
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (!start) {
+
+		/* Are we allowed to unload the media? */
+		if (curlun->prevent_medium_removal) {
+			LDBG(curlun, "unload attempt prevented\n");
+			curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
+			return -EINVAL;
+		}
+		if (loej) {		// Simulate an unload/eject
+			up_read(&fsg->filesem);
+			down_write(&fsg->filesem);
+			fsg_lun_close(curlun);
+			up_write(&fsg->filesem);
+			down_read(&fsg->filesem);
+		}
+	} else {
+
+		/* Our emulation doesn't support mounting; the medium is
+		 * available for use as soon as it is loaded. */
+		if (!fsg_lun_is_open(curlun)) {
+			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+			return -EINVAL;
+		}
+	}
+#endif
+	return 0;
+}
+
+
+static int do_prevent_allow(struct fsg_dev *fsg)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	int		prevent;
+
+	if (!mod_data.removable) {
+		curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	}
+
+	prevent = fsg->cmnd[4] & 0x01;
+	if ((fsg->cmnd[4] & ~0x01) != 0) {		// Mask away Prevent
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (curlun->prevent_medium_removal && !prevent)
+		fsg_lun_fsync_sub(curlun);
+	curlun->prevent_medium_removal = prevent;
+	return 0;
+}
+
+
+static int do_read_format_capacities(struct fsg_dev *fsg,
+			struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+
+	buf[0] = buf[1] = buf[2] = 0;
+	buf[3] = 8;		// Only the Current/Maximum Capacity Descriptor
+	buf += 4;
+
+	put_unaligned_be32(curlun->num_sectors, &buf[0]);
+						/* Number of blocks */
+	put_unaligned_be32(curlun->blksize, &buf[4]);	/* Block length */
+	buf[4] = 0x02;				/* Current capacity */
+	return 12;
+}
+
+
+static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = fsg->curlun;
+
+	/* We don't support MODE SELECT */
+	curlun->sense_data = SS_INVALID_COMMAND;
+	return -EINVAL;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	rc = fsg_set_halt(fsg, fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint halt\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_halt(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	DBG(fsg, "bulk-in set wedge\n");
+	rc = usb_ep_set_wedge(fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_wedge(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int throw_away_data(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	u32			amount;
+	int			rc;
+
+	while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
+			fsg->usb_amount_left > 0) {
+
+		/* Throw away the data in a filled buffer */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			bh->state = BUF_STATE_EMPTY;
+			fsg->next_buffhd_to_drain = bh->next;
+
+			/* A short packet or an error ends everything */
+			if (bh->outreq->actual < bh->bulk_out_intended_length ||
+					bh->outreq->status != 0) {
+				raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+				return -EINTR;
+			}
+			continue;
+		}
+
+		/* Try to submit another request if we need one */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
+			amount = min(fsg->usb_amount_left,
+					(u32) mod_data.buflen);
+
+			/* Except at the end of the transfer, amount will be
+			 * equal to the buffer size, which is divisible by
+			 * the bulk-out maxpacket size.
+			 */
+			set_bulk_out_req_length(fsg, bh, amount);
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			fsg->usb_amount_left -= amount;
+			continue;
+		}
+
+		/* Otherwise wait for something to happen */
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+
+static int finish_reply(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	int			rc = 0;
+
+	switch (fsg->data_dir) {
+	case DATA_DIR_NONE:
+		break;			// Nothing to send
+
+	/* If we don't know whether the host wants to read or write,
+	 * this must be CB or CBI with an unknown command.  We mustn't
+	 * try to send or receive any data.  So stall both bulk pipes
+	 * if we can and wait for a reset. */
+	case DATA_DIR_UNKNOWN:
+		if (mod_data.can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			rc = halt_bulk_in_endpoint(fsg);
+		}
+		break;
+
+	/* All but the last buffer of data must have already been sent */
+	case DATA_DIR_TO_HOST:
+		if (fsg->data_size == 0)
+			;		// Nothing to send
+
+		/* If there's no residue, simply send the last buffer */
+		else if (fsg->residue == 0) {
+			bh->inreq->zero = 0;
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+		}
+
+		/* There is a residue.  For CB and CBI, simply mark the end
+		 * of the data with a short packet.  However, if we are
+		 * allowed to stall, there was no data at all (residue ==
+		 * data_size), and the command failed (invalid LUN or
+		 * sense data is set), then halt the bulk-in endpoint
+		 * instead. */
+		else if (!transport_is_bbb()) {
+			if (mod_data.can_stall &&
+					fsg->residue == fsg->data_size &&
+	(!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) {
+				bh->state = BUF_STATE_EMPTY;
+				rc = halt_bulk_in_endpoint(fsg);
+			} else {
+				bh->inreq->zero = 1;
+				start_transfer(fsg, fsg->bulk_in, bh->inreq,
+						&bh->inreq_busy, &bh->state);
+				fsg->next_buffhd_to_fill = bh->next;
+			}
+		}
+
+		/*
+		 * For Bulk-only, mark the end of the data with a short
+		 * packet.  If we are allowed to stall, halt the bulk-in
+		 * endpoint.  (Note: This violates the Bulk-Only Transport
+		 * specification, which requires us to pad the data if we
+		 * don't halt the endpoint.  Presumably nobody will mind.)
+		 */
+		else {
+			bh->inreq->zero = 1;
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			if (mod_data.can_stall)
+				rc = halt_bulk_in_endpoint(fsg);
+		}
+		break;
+
+	/* We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests. */
+	case DATA_DIR_FROM_HOST:
+		if (fsg->residue == 0)
+			;		// Nothing to receive
+
+		/* Did the host stop sending unexpectedly early? */
+		else if (fsg->short_packet_received) {
+			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+		}
+
+		/* We haven't processed all the incoming data.  Even though
+		 * we may be allowed to stall, doing so would cause a race.
+		 * The controller may already have ACK'ed all the remaining
+		 * bulk-out packets, in which case the host wouldn't see a
+		 * STALL.  Not realizing the endpoint was halted, it wouldn't
+		 * clear the halt -- leading to problems later on. */
+#if 0
+		else if (mod_data.can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+		}
+#endif
+
+		/* We can't stall.  Read in the excess data and throw it
+		 * all away. */
+		else
+			rc = throw_away_data(fsg);
+		break;
+	}
+	return rc;
+}
+
+
+static int send_status(struct fsg_dev *fsg)
+{
+	struct fsg_lun		*curlun = fsg->curlun;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u8			status = US_BULK_STAT_OK;
+	u32			sd, sdinfo = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	if (curlun) {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+	} else if (fsg->bad_lun_okay)
+		sd = SS_NO_SENSE;
+	else
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+	if (fsg->phase_error) {
+		DBG(fsg, "sending phase-error status\n");
+		status = US_BULK_STAT_PHASE;
+		sd = SS_INVALID_COMMAND;
+	} else if (sd != SS_NO_SENSE) {
+		DBG(fsg, "sending command-failure status\n");
+		status = US_BULK_STAT_FAIL;
+		VDBG(fsg, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+				"  info x%x\n",
+				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+	}
+
+	if (transport_is_bbb()) {
+		struct bulk_cs_wrap	*csw = bh->buf;
+
+		/* Store and send the Bulk-only CSW */
+		csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
+		csw->Tag = fsg->tag;
+		csw->Residue = cpu_to_le32(fsg->residue);
+		csw->Status = status;
+
+		bh->inreq->length = US_BULK_CS_WRAP_LEN;
+		bh->inreq->zero = 0;
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+
+	} else if (mod_data.transport_type == USB_PR_CB) {
+
+		/* Control-Bulk transport has no status phase! */
+		return 0;
+
+	} else {			// USB_PR_CBI
+		struct interrupt_data	*buf = bh->buf;
+
+		/* Store and send the Interrupt data.  UFI sends the ASC
+		 * and ASCQ bytes.  Everything else sends a Type (which
+		 * is always 0) and the status Value. */
+		if (mod_data.protocol_type == USB_SC_UFI) {
+			buf->bType = ASC(sd);
+			buf->bValue = ASCQ(sd);
+		} else {
+			buf->bType = 0;
+			buf->bValue = status;
+		}
+		fsg->intreq->length = CBI_INTERRUPT_DATA_LEN;
+
+		fsg->intr_buffhd = bh;		// Point to the right buffhd
+		fsg->intreq->buf = bh->inreq->buf;
+		fsg->intreq->context = bh;
+		start_transfer(fsg, fsg->intr_in, fsg->intreq,
+				&fsg->intreq_busy, &bh->state);
+	}
+
+	fsg->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have. */
+static int check_command(struct fsg_dev *fsg, int cmnd_size,
+		enum data_direction data_dir, unsigned int mask,
+		int needs_medium, const char *name)
+{
+	int			i;
+	int			lun = fsg->cmnd[1] >> 5;
+	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
+	char			hdlen[20];
+	struct fsg_lun		*curlun;
+
+	/* Adjust the expected cmnd_size for protocol encapsulation padding.
+	 * Transparent SCSI doesn't pad. */
+	if (protocol_is_scsi())
+		;
+
+	/* There's some disagreement as to whether RBC pads commands or not.
+	 * We'll play it safe and accept either form. */
+	else if (mod_data.protocol_type == USB_SC_RBC) {
+		if (fsg->cmnd_size == 12)
+			cmnd_size = 12;
+
+	/* All the other protocols pad to 12 bytes */
+	} else
+		cmnd_size = 12;
+
+	hdlen[0] = 0;
+	if (fsg->data_dir != DATA_DIR_UNKNOWN)
+		sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
+				fsg->data_size);
+	VDBG(fsg, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+			name, cmnd_size, dirletter[(int) data_dir],
+			fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
+
+	/* We can't reply at all until we know the correct data direction
+	 * and size. */
+	if (fsg->data_size_from_cmnd == 0)
+		data_dir = DATA_DIR_NONE;
+	if (fsg->data_dir == DATA_DIR_UNKNOWN) {	// CB or CBI
+		fsg->data_dir = data_dir;
+		fsg->data_size = fsg->data_size_from_cmnd;
+
+	} else {					// Bulk-only
+		if (fsg->data_size < fsg->data_size_from_cmnd) {
+
+			/* Host data size < Device data size is a phase error.
+			 * Carry out the command, but only transfer as much
+			 * as we are allowed. */
+			fsg->data_size_from_cmnd = fsg->data_size;
+			fsg->phase_error = 1;
+		}
+	}
+	fsg->residue = fsg->usb_amount_left = fsg->data_size;
+
+	/* Conflicting data directions is a phase error */
+	if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
+		fsg->phase_error = 1;
+		return -EINVAL;
+	}
+
+	/* Verify the length of the command itself */
+	if (cmnd_size != fsg->cmnd_size) {
+
+		/* Special case workaround: There are plenty of buggy SCSI
+		 * implementations. Many have issues with cbw->Length
+		 * field passing a wrong command size. For those cases we
+		 * always try to work around the problem by using the length
+		 * sent by the host side provided it is at least as large
+		 * as the correct command length.
+		 * Examples of such cases would be MS-Windows, which issues
+		 * REQUEST SENSE with cbw->Length == 12 where it should
+		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
+		 * REQUEST SENSE with cbw->Length == 10 where it should
+		 * be 6 as well.
+		 */
+		if (cmnd_size <= fsg->cmnd_size) {
+			DBG(fsg, "%s is buggy! Expected length %d "
+					"but we got %d\n", name,
+					cmnd_size, fsg->cmnd_size);
+			cmnd_size = fsg->cmnd_size;
+		} else {
+			fsg->phase_error = 1;
+			return -EINVAL;
+		}
+	}
+
+	/* Check that the LUN values are consistent */
+	if (transport_is_bbb()) {
+		if (fsg->lun != lun)
+			DBG(fsg, "using LUN %d from CBW, "
+					"not LUN %d from CDB\n",
+					fsg->lun, lun);
+	}
+
+	/* Check the LUN */
+	curlun = fsg->curlun;
+	if (curlun) {
+		if (fsg->cmnd[0] != REQUEST_SENSE) {
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+	} else {
+		fsg->bad_lun_okay = 0;
+
+		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not. */
+		if (fsg->cmnd[0] != INQUIRY &&
+				fsg->cmnd[0] != REQUEST_SENSE) {
+			DBG(fsg, "unsupported LUN %d\n", fsg->lun);
+			return -EINVAL;
+		}
+	}
+
+	/* If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail. */
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+			fsg->cmnd[0] != INQUIRY &&
+			fsg->cmnd[0] != REQUEST_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+		return -EINVAL;
+	}
+
+	/* Check that only command bytes listed in the mask are non-zero */
+	fsg->cmnd[1] &= 0x1f;			// Mask away the LUN
+	for (i = 1; i < cmnd_size; ++i) {
+		if (fsg->cmnd[i] && !(mask & (1 << i))) {
+			if (curlun)
+				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+
+	/* If the medium isn't mounted and the command needs to access
+	 * it, return an error. */
+	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
+		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_dev *fsg, int cmnd_size,
+		enum data_direction data_dir, unsigned int mask,
+		int needs_medium, const char *name)
+{
+	if (fsg->curlun)
+		fsg->data_size_from_cmnd <<= fsg->curlun->blkbits;
+	return check_command(fsg, cmnd_size, data_dir,
+			mask, needs_medium, name);
+}
+
+static int do_scsi_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+	int			reply = -EINVAL;
+	int			i;
+	static char		unknown[16];
+
+	dump_cdb(fsg);
+
+	/* Wait for the next buffer to become available for data or status */
+	bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	fsg->phase_error = 0;
+	fsg->short_packet_received = 0;
+
+	down_read(&fsg->filesem);	// We're using the backing file
+	switch (fsg->cmnd[0]) {
+
+	case INQUIRY:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"INQUIRY")) == 0)
+			reply = do_inquiry(fsg, bh);
+		break;
+
+	case MODE_SELECT:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(1<<1) | (1<<4), 0,
+				"MODE SELECT(6)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case MODE_SELECT_10:
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (3<<7), 0,
+				"MODE SELECT(10)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case MODE_SENSE:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (1<<4), 0,
+				"MODE SENSE(6)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case MODE_SENSE_10:
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (3<<7), 0,
+				"MODE SENSE(10)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case ALLOW_MEDIUM_REMOVAL:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<4), 0,
+				"PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
+			reply = do_prevent_allow(fsg);
+		break;
+
+	case READ_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
+		if ((reply = check_command_size_in_blocks(fsg, 6,
+				DATA_DIR_TO_HOST,
+				(7<<1) | (1<<4), 1,
+				"READ(6)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case READ_10:
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+		if ((reply = check_command_size_in_blocks(fsg, 10,
+				DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"READ(10)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case READ_12:
+		fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
+		if ((reply = check_command_size_in_blocks(fsg, 12,
+				DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"READ(12)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case READ_CAPACITY:
+		fsg->data_size_from_cmnd = 8;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(0xf<<2) | (1<<8), 1,
+				"READ CAPACITY")) == 0)
+			reply = do_read_capacity(fsg, bh);
+		break;
+
+	case READ_HEADER:
+		if (!mod_data.cdrom)
+			goto unknown_cmnd;
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(3<<7) | (0x1f<<1), 1,
+				"READ HEADER")) == 0)
+			reply = do_read_header(fsg, bh);
+		break;
+
+	case READ_TOC:
+		if (!mod_data.cdrom)
+			goto unknown_cmnd;
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(7<<6) | (1<<1), 1,
+				"READ TOC")) == 0)
+			reply = do_read_toc(fsg, bh);
+		break;
+
+	case READ_FORMAT_CAPACITIES:
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(3<<7), 1,
+				"READ FORMAT CAPACITIES")) == 0)
+			reply = do_read_format_capacities(fsg, bh);
+		break;
+
+	case REQUEST_SENSE:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"REQUEST SENSE")) == 0)
+			reply = do_request_sense(fsg, bh);
+		break;
+
+	case START_STOP:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<1) | (1<<4), 0,
+				"START-STOP UNIT")) == 0)
+			reply = do_start_stop(fsg);
+		break;
+
+	case SYNCHRONIZE_CACHE:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(0xf<<2) | (3<<7), 1,
+				"SYNCHRONIZE CACHE")) == 0)
+			reply = do_synchronize_cache(fsg);
+		break;
+
+	case TEST_UNIT_READY:
+		fsg->data_size_from_cmnd = 0;
+		reply = check_command(fsg, 6, DATA_DIR_NONE,
+				0, 1,
+				"TEST UNIT READY");
+		break;
+
+	/* Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0. */
+	case VERIFY:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"VERIFY")) == 0)
+			reply = do_verify(fsg);
+		break;
+
+	case WRITE_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
+		if ((reply = check_command_size_in_blocks(fsg, 6,
+				DATA_DIR_FROM_HOST,
+				(7<<1) | (1<<4), 1,
+				"WRITE(6)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case WRITE_10:
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+		if ((reply = check_command_size_in_blocks(fsg, 10,
+				DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"WRITE(10)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case WRITE_12:
+		fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
+		if ((reply = check_command_size_in_blocks(fsg, 12,
+				DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"WRITE(12)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	/* Some mandatory commands that we recognize but don't implement.
+	 * They don't mean much in this setting.  It's left as an exercise
+	 * for anyone interested to implement RESERVE and RELEASE in terms
+	 * of Posix locks. */
+	case FORMAT_UNIT:
+	case RELEASE:
+	case RESERVE:
+	case SEND_DIAGNOSTIC:
+		// Fall through
+
+	default:
+ unknown_cmnd:
+		fsg->data_size_from_cmnd = 0;
+		sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
+		if ((reply = check_command(fsg, fsg->cmnd_size,
+				DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
+			fsg->curlun->sense_data = SS_INVALID_COMMAND;
+			reply = -EINVAL;
+		}
+		break;
+	}
+	up_read(&fsg->filesem);
+
+	if (reply == -EINTR || signal_pending(current))
+		return -EINTR;
+
+	/* Set up the single reply buffer for finish_reply() */
+	if (reply == -EINVAL)
+		reply = 0;		// Error reply length
+	if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
+		reply = min((u32) reply, fsg->data_size_from_cmnd);
+		bh->inreq->length = reply;
+		bh->state = BUF_STATE_FULL;
+		fsg->residue -= reply;
+	}				// Otherwise it's already set
+
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request		*req = bh->outreq;
+	struct bulk_cb_wrap	*cbw = req->buf;
+
+	/* Was this a real packet?  Should it be ignored? */
+	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
+		return -EINVAL;
+
+	/* Is the CBW valid? */
+	if (req->actual != US_BULK_CB_WRAP_LEN ||
+			cbw->Signature != cpu_to_le32(
+				US_BULK_CB_SIGN)) {
+		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+				req->actual,
+				le32_to_cpu(cbw->Signature));
+
+		/* The Bulk-only spec says we MUST stall the IN endpoint
+		 * (6.6.1), so it's unavoidable.  It also says we must
+		 * retain this state until the next reset, but there's
+		 * no way to tell the controller driver it should ignore
+		 * Clear-Feature(HALT) requests.
+		 *
+		 * We aren't required to halt the OUT endpoint; instead
+		 * we can simply accept and discard any data received
+		 * until the next reset. */
+		wedge_bulk_in_endpoint(fsg);
+		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+		return -EINVAL;
+	}
+
+	/* Is the CBW meaningful? */
+	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
+			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+				"cmdlen %u\n",
+				cbw->Lun, cbw->Flags, cbw->Length);
+
+		/* We can do anything we want here, so let's stall the
+		 * bulk pipes if we are allowed to. */
+		if (mod_data.can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			halt_bulk_in_endpoint(fsg);
+		}
+		return -EINVAL;
+	}
+
+	/* Save the command for later */
+	fsg->cmnd_size = cbw->Length;
+	memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
+	if (cbw->Flags & US_BULK_FLAG_IN)
+		fsg->data_dir = DATA_DIR_TO_HOST;
+	else
+		fsg->data_dir = DATA_DIR_FROM_HOST;
+	fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
+	if (fsg->data_size == 0)
+		fsg->data_dir = DATA_DIR_NONE;
+	fsg->lun = cbw->Lun;
+	fsg->tag = cbw->Tag;
+	return 0;
+}
+
+
+static int get_next_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc = 0;
+
+	if (transport_is_bbb()) {
+
+		/* Wait for the next buffer to become available */
+		bh = fsg->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(fsg);
+			if (rc)
+				return rc;
+		}
+
+		/* Queue a request to read a Bulk-only CBW */
+		set_bulk_out_req_length(fsg, bh, US_BULK_CB_WRAP_LEN);
+		start_transfer(fsg, fsg->bulk_out, bh->outreq,
+				&bh->outreq_busy, &bh->state);
+
+		/* We will drain the buffer in software, which means we
+		 * can reuse it for the next filling.  No need to advance
+		 * next_buffhd_to_fill. */
+
+		/* Wait for the CBW to arrive */
+		while (bh->state != BUF_STATE_FULL) {
+			rc = sleep_thread(fsg);
+			if (rc)
+				return rc;
+		}
+		smp_rmb();
+		rc = received_cbw(fsg, bh);
+		bh->state = BUF_STATE_EMPTY;
+
+	} else {		// USB_PR_CB or USB_PR_CBI
+
+		/* Wait for the next command to arrive */
+		while (fsg->cbbuf_cmnd_size == 0) {
+			rc = sleep_thread(fsg);
+			if (rc)
+				return rc;
+		}
+
+		/* Is the previous status interrupt request still busy?
+		 * The host is allowed to skip reading the status,
+		 * so we must cancel it. */
+		if (fsg->intreq_busy)
+			usb_ep_dequeue(fsg->intr_in, fsg->intreq);
+
+		/* Copy the command and mark the buffer empty */
+		fsg->data_dir = DATA_DIR_UNKNOWN;
+		spin_lock_irq(&fsg->lock);
+		fsg->cmnd_size = fsg->cbbuf_cmnd_size;
+		memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
+		fsg->cbbuf_cmnd_size = 0;
+		spin_unlock_irq(&fsg->lock);
+
+		/* Use LUN from the command */
+		fsg->lun = fsg->cmnd[1] >> 5;
+	}
+
+	/* Update current lun */
+	if (fsg->lun >= 0 && fsg->lun < fsg->nluns)
+		fsg->curlun = &fsg->luns[fsg->lun];
+	else
+		fsg->curlun = NULL;
+
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
+		const struct usb_endpoint_descriptor *d)
+{
+	int	rc;
+
+	ep->driver_data = fsg;
+	ep->desc = d;
+	rc = usb_ep_enable(ep);
+	if (rc)
+		ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
+	return rc;
+}
+
+static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
+		struct usb_request **preq)
+{
+	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
+	if (*preq)
+		return 0;
+	ERROR(fsg, "can't allocate request for %s\n", ep->name);
+	return -ENOMEM;
+}
+
+/*
+ * Reset interface setting and re-init endpoint state (toggle etc).
+ * Call with altsetting < 0 to disable the interface.  The only other
+ * available altsetting is 0, which enables the interface.
+ */
+static int do_set_interface(struct fsg_dev *fsg, int altsetting)
+{
+	int	rc = 0;
+	int	i;
+	const struct usb_endpoint_descriptor	*d;
+
+	if (fsg->running)
+		DBG(fsg, "reset interface\n");
+
+reset:
+	/* Deallocate the requests */
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		struct fsg_buffhd *bh = &fsg->buffhds[i];
+
+		if (bh->inreq) {
+			usb_ep_free_request(fsg->bulk_in, bh->inreq);
+			bh->inreq = NULL;
+		}
+		if (bh->outreq) {
+			usb_ep_free_request(fsg->bulk_out, bh->outreq);
+			bh->outreq = NULL;
+		}
+	}
+	if (fsg->intreq) {
+		usb_ep_free_request(fsg->intr_in, fsg->intreq);
+		fsg->intreq = NULL;
+	}
+
+	/* Disable the endpoints */
+	if (fsg->bulk_in_enabled) {
+		usb_ep_disable(fsg->bulk_in);
+		fsg->bulk_in_enabled = 0;
+	}
+	if (fsg->bulk_out_enabled) {
+		usb_ep_disable(fsg->bulk_out);
+		fsg->bulk_out_enabled = 0;
+	}
+	if (fsg->intr_in_enabled) {
+		usb_ep_disable(fsg->intr_in);
+		fsg->intr_in_enabled = 0;
+	}
+
+	fsg->running = 0;
+	if (altsetting < 0 || rc != 0)
+		return rc;
+
+	DBG(fsg, "set interface %d\n", altsetting);
+
+	/* Enable the endpoints */
+	d = fsg_ep_desc(fsg->gadget,
+			&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc,
+			&fsg_ss_bulk_in_desc);
+	if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
+		goto reset;
+	fsg->bulk_in_enabled = 1;
+
+	d = fsg_ep_desc(fsg->gadget,
+			&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc,
+			&fsg_ss_bulk_out_desc);
+	if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
+		goto reset;
+	fsg->bulk_out_enabled = 1;
+	fsg->bulk_out_maxpacket = usb_endpoint_maxp(d);
+	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+
+	if (transport_is_cbi()) {
+		d = fsg_ep_desc(fsg->gadget,
+				&fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc,
+				&fsg_ss_intr_in_desc);
+		if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
+			goto reset;
+		fsg->intr_in_enabled = 1;
+	}
+
+	/* Allocate the requests */
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
+			goto reset;
+		if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
+			goto reset;
+		bh->inreq->buf = bh->outreq->buf = bh->buf;
+		bh->inreq->context = bh->outreq->context = bh;
+		bh->inreq->complete = bulk_in_complete;
+		bh->outreq->complete = bulk_out_complete;
+	}
+	if (transport_is_cbi()) {
+		if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0)
+			goto reset;
+		fsg->intreq->complete = intr_in_complete;
+	}
+
+	fsg->running = 1;
+	for (i = 0; i < fsg->nluns; ++i)
+		fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+	return rc;
+}
+
+
+/*
+ * Change our operational configuration.  This code must agree with the code
+ * that returns config descriptors, and with interface altsetting code.
+ *
+ * It's also responsible for power management interactions.  Some
+ * configurations might not work with our current power sources.
+ * For now we just assume the gadget is always self-powered.
+ */
+static int do_set_config(struct fsg_dev *fsg, u8 new_config)
+{
+	int	rc = 0;
+
+	/* Disable the single interface */
+	if (fsg->config != 0) {
+		DBG(fsg, "reset config\n");
+		fsg->config = 0;
+		rc = do_set_interface(fsg, -1);
+	}
+
+	/* Enable the interface */
+	if (new_config != 0) {
+		fsg->config = new_config;
+		if ((rc = do_set_interface(fsg, 0)) != 0)
+			fsg->config = 0;	// Reset on errors
+		else
+			INFO(fsg, "%s config #%d\n",
+			     usb_speed_string(fsg->gadget->speed),
+			     fsg->config);
+	}
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_dev *fsg)
+{
+	siginfo_t		info;
+	int			sig;
+	int			i;
+	int			num_active;
+	struct fsg_buffhd	*bh;
+	enum fsg_state		old_state;
+	u8			new_config;
+	struct fsg_lun		*curlun;
+	unsigned int		exception_req_tag;
+	int			rc;
+
+	/* Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception. */
+	for (;;) {
+		sig = dequeue_signal_lock(current, &current->blocked, &info);
+		if (!sig)
+			break;
+		if (sig != SIGUSR1) {
+			if (fsg->state < FSG_STATE_EXIT)
+				DBG(fsg, "Main thread exiting on signal\n");
+			raise_exception(fsg, FSG_STATE_EXIT);
+		}
+	}
+
+	/* Cancel all the pending transfers */
+	if (fsg->intreq_busy)
+		usb_ep_dequeue(fsg->intr_in, fsg->intreq);
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		bh = &fsg->buffhds[i];
+		if (bh->inreq_busy)
+			usb_ep_dequeue(fsg->bulk_in, bh->inreq);
+		if (bh->outreq_busy)
+			usb_ep_dequeue(fsg->bulk_out, bh->outreq);
+	}
+
+	/* Wait until everything is idle */
+	for (;;) {
+		num_active = fsg->intreq_busy;
+		for (i = 0; i < fsg_num_buffers; ++i) {
+			bh = &fsg->buffhds[i];
+			num_active += bh->inreq_busy + bh->outreq_busy;
+		}
+		if (num_active == 0)
+			break;
+		if (sleep_thread(fsg))
+			return;
+	}
+
+	/* Clear out the controller's fifos */
+	if (fsg->bulk_in_enabled)
+		usb_ep_fifo_flush(fsg->bulk_in);
+	if (fsg->bulk_out_enabled)
+		usb_ep_fifo_flush(fsg->bulk_out);
+	if (fsg->intr_in_enabled)
+		usb_ep_fifo_flush(fsg->intr_in);
+
+	/* Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler. */
+	spin_lock_irq(&fsg->lock);
+
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		bh = &fsg->buffhds[i];
+		bh->state = BUF_STATE_EMPTY;
+	}
+	fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
+			&fsg->buffhds[0];
+
+	exception_req_tag = fsg->exception_req_tag;
+	new_config = fsg->new_config;
+	old_state = fsg->state;
+
+	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+		fsg->state = FSG_STATE_STATUS_PHASE;
+	else {
+		for (i = 0; i < fsg->nluns; ++i) {
+			curlun = &fsg->luns[i];
+			curlun->prevent_medium_removal = 0;
+			curlun->sense_data = curlun->unit_attention_data =
+					SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+		fsg->state = FSG_STATE_IDLE;
+	}
+	spin_unlock_irq(&fsg->lock);
+
+	/* Carry out any extra actions required for the exception */
+	switch (old_state) {
+	default:
+		break;
+
+	case FSG_STATE_ABORT_BULK_OUT:
+		send_status(fsg);
+		spin_lock_irq(&fsg->lock);
+		if (fsg->state == FSG_STATE_STATUS_PHASE)
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&fsg->lock);
+		break;
+
+	case FSG_STATE_RESET:
+		/* In case we were forced against our will to halt a
+		 * bulk endpoint, clear the halt now.  (The SuperH UDC
+		 * requires this.) */
+		if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
+			usb_ep_clear_halt(fsg->bulk_in);
+
+		if (transport_is_bbb()) {
+			if (fsg->ep0_req_tag == exception_req_tag)
+				ep0_queue(fsg);	// Complete the status stage
+
+		} else if (transport_is_cbi())
+			send_status(fsg);	// Status by interrupt pipe
+
+		/* Technically this should go here, but it would only be
+		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
+		 * CONFIG_CHANGE cases. */
+		// for (i = 0; i < fsg->nluns; ++i)
+		//	fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+		break;
+
+	case FSG_STATE_INTERFACE_CHANGE:
+		rc = do_set_interface(fsg, 0);
+		if (fsg->ep0_req_tag != exception_req_tag)
+			break;
+		if (rc != 0)			// STALL on errors
+			fsg_set_halt(fsg, fsg->ep0);
+		else				// Complete the status stage
+			ep0_queue(fsg);
+		break;
+
+	case FSG_STATE_CONFIG_CHANGE:
+		rc = do_set_config(fsg, new_config);
+		if (fsg->ep0_req_tag != exception_req_tag)
+			break;
+		if (rc != 0)			// STALL on errors
+			fsg_set_halt(fsg, fsg->ep0);
+		else				// Complete the status stage
+			ep0_queue(fsg);
+		break;
+
+	case FSG_STATE_DISCONNECT:
+		for (i = 0; i < fsg->nluns; ++i)
+			fsg_lun_fsync_sub(fsg->luns + i);
+		do_set_config(fsg, 0);		// Unconfigured state
+		break;
+
+	case FSG_STATE_EXIT:
+	case FSG_STATE_TERMINATED:
+		do_set_config(fsg, 0);			// Free resources
+		spin_lock_irq(&fsg->lock);
+		fsg->state = FSG_STATE_TERMINATED;	// Stop the thread
+		spin_unlock_irq(&fsg->lock);
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *fsg_)
+{
+	struct fsg_dev		*fsg = fsg_;
+
+	/* Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1. */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	/* Arrange for userspace references to be interpreted as kernel
+	 * pointers.  That way we can pass a kernel pointer to a routine
+	 * that expects a __user pointer and it will work okay. */
+	set_fs(get_ds());
+
+	/* The main loop */
+	while (fsg->state != FSG_STATE_TERMINATED) {
+		if (exception_in_progress(fsg) || signal_pending(current)) {
+			handle_exception(fsg);
+			continue;
+		}
+
+		if (!fsg->running) {
+			sleep_thread(fsg);
+			continue;
+		}
+
+		if (get_next_command(fsg))
+			continue;
+
+		spin_lock_irq(&fsg->lock);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_DATA_PHASE;
+		spin_unlock_irq(&fsg->lock);
+
+		if (do_scsi_command(fsg) || finish_reply(fsg))
+			continue;
+
+		spin_lock_irq(&fsg->lock);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_STATUS_PHASE;
+		spin_unlock_irq(&fsg->lock);
+
+		if (send_status(fsg))
+			continue;
+
+		spin_lock_irq(&fsg->lock);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&fsg->lock);
+		}
+
+	spin_lock_irq(&fsg->lock);
+	fsg->thread_task = NULL;
+	spin_unlock_irq(&fsg->lock);
+
+	/* If we are exiting because of a signal, unregister the
+	 * gadget driver. */
+	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
+		usb_gadget_unregister_driver(&fsg_driver);
+
+	/* Let the unbind and cleanup routines know the thread has exited */
+	complete_and_exit(&fsg->thread_notifier, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+/* The write permissions and store_xxx pointers are set in fsg_bind() */
+static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
+static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, NULL);
+static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
+
+
+/*-------------------------------------------------------------------------*/
+
+static void fsg_release(struct kref *ref)
+{
+	struct fsg_dev	*fsg = container_of(ref, struct fsg_dev, ref);
+
+	kfree(fsg->luns);
+	kfree(fsg);
+}
+
+static void lun_release(struct device *dev)
+{
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	struct fsg_dev		*fsg =
+		container_of(filesem, struct fsg_dev, filesem);
+
+	kref_put(&fsg->ref, fsg_release);
+}
+
+static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+	int			i;
+	struct fsg_lun		*curlun;
+	struct usb_request	*req = fsg->ep0req;
+
+	DBG(fsg, "unbind\n");
+	clear_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* If the thread isn't already dead, tell it to exit now */
+	if (fsg->state != FSG_STATE_TERMINATED) {
+		raise_exception(fsg, FSG_STATE_EXIT);
+		wait_for_completion(&fsg->thread_notifier);
+
+		/* The cleanup routine waits for this completion also */
+		complete(&fsg->thread_notifier);
+	}
+
+	/* Unregister the sysfs attribute files and the LUNs */
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (curlun->registered) {
+			device_remove_file(&curlun->dev, &dev_attr_nofua);
+			device_remove_file(&curlun->dev, &dev_attr_ro);
+			device_remove_file(&curlun->dev, &dev_attr_file);
+			fsg_lun_close(curlun);
+			device_unregister(&curlun->dev);
+			curlun->registered = 0;
+		}
+	}
+
+	/* Free the data buffers */
+	for (i = 0; i < fsg_num_buffers; ++i)
+		kfree(fsg->buffhds[i].buf);
+
+	/* Free the request and buffer for endpoint 0 */
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(fsg->ep0, req);
+	}
+
+	set_gadget_data(gadget, NULL);
+}
+
+
+static int __init check_parameters(struct fsg_dev *fsg)
+{
+	int	prot;
+	int	gcnum;
+
+	/* Store the default values */
+	mod_data.transport_type = USB_PR_BULK;
+	mod_data.transport_name = "Bulk-only";
+	mod_data.protocol_type = USB_SC_SCSI;
+	mod_data.protocol_name = "Transparent SCSI";
+
+	/* Some peripheral controllers are known not to be able to
+	 * halt bulk endpoints correctly.  If one of them is present,
+	 * disable stalls.
+	 */
+	if (gadget_is_at91(fsg->gadget))
+		mod_data.can_stall = 0;
+
+	if (mod_data.release == 0xffff) {	// Parameter wasn't set
+		gcnum = usb_gadget_controller_number(fsg->gadget);
+		if (gcnum >= 0)
+			mod_data.release = 0x0300 + gcnum;
+		else {
+			WARNING(fsg, "controller '%s' not recognized\n",
+				fsg->gadget->name);
+			mod_data.release = 0x0399;
+		}
+	}
+
+	prot = simple_strtol(mod_data.protocol_parm, NULL, 0);
+
+#ifdef CONFIG_USB_FILE_STORAGE_TEST
+	if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) {
+		;		// Use default setting
+	} else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) {
+		mod_data.transport_type = USB_PR_CB;
+		mod_data.transport_name = "Control-Bulk";
+	} else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) {
+		mod_data.transport_type = USB_PR_CBI;
+		mod_data.transport_name = "Control-Bulk-Interrupt";
+	} else {
+		ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm);
+		return -EINVAL;
+	}
+
+	if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 ||
+			prot == USB_SC_SCSI) {
+		;		// Use default setting
+	} else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 ||
+			prot == USB_SC_RBC) {
+		mod_data.protocol_type = USB_SC_RBC;
+		mod_data.protocol_name = "RBC";
+	} else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 ||
+			strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 ||
+			prot == USB_SC_8020) {
+		mod_data.protocol_type = USB_SC_8020;
+		mod_data.protocol_name = "8020i (ATAPI)";
+	} else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 ||
+			prot == USB_SC_QIC) {
+		mod_data.protocol_type = USB_SC_QIC;
+		mod_data.protocol_name = "QIC-157";
+	} else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 ||
+			prot == USB_SC_UFI) {
+		mod_data.protocol_type = USB_SC_UFI;
+		mod_data.protocol_name = "UFI";
+	} else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 ||
+			prot == USB_SC_8070) {
+		mod_data.protocol_type = USB_SC_8070;
+		mod_data.protocol_name = "8070i";
+	} else {
+		ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm);
+		return -EINVAL;
+	}
+
+	mod_data.buflen &= PAGE_CACHE_MASK;
+	if (mod_data.buflen <= 0) {
+		ERROR(fsg, "invalid buflen\n");
+		return -ETOOSMALL;
+	}
+
+#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+
+	/* Serial string handling.
+	 * On a real device, the serial string would be loaded
+	 * from permanent storage. */
+	if (mod_data.serial) {
+		const char *ch;
+		unsigned len = 0;
+
+		/* Sanity check :
+		 * The CB[I] specification limits the serial string to
+		 * 12 uppercase hexadecimal characters.
+		 * BBB need at least 12 uppercase hexadecimal characters,
+		 * with a maximum of 126. */
+		for (ch = mod_data.serial; *ch; ++ch) {
+			++len;
+			if ((*ch < '0' || *ch > '9') &&
+			    (*ch < 'A' || *ch > 'F')) { /* not uppercase hex */
+				WARNING(fsg,
+					"Invalid serial string character: %c\n",
+					*ch);
+				goto no_serial;
+			}
+		}
+		if (len > 126 ||
+		    (mod_data.transport_type == USB_PR_BULK && len < 12) ||
+		    (mod_data.transport_type != USB_PR_BULK && len > 12)) {
+			WARNING(fsg, "Invalid serial string length!\n");
+			goto no_serial;
+		}
+		fsg_strings[FSG_STRING_SERIAL - 1].s = mod_data.serial;
+	} else {
+		WARNING(fsg, "No serial-number string provided!\n");
+ no_serial:
+		device_desc.iSerialNumber = 0;
+	}
+
+	return 0;
+}
+
+
+static int __init fsg_bind(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	int			rc;
+	int			i;
+	struct fsg_lun		*curlun;
+	struct usb_ep		*ep;
+	struct usb_request	*req;
+	char			*pathbuf, *p;
+
+	fsg->gadget = gadget;
+	set_gadget_data(gadget, fsg);
+	fsg->ep0 = gadget->ep0;
+	fsg->ep0->driver_data = fsg;
+
+	if ((rc = check_parameters(fsg)) != 0)
+		goto out;
+
+	if (mod_data.removable) {	// Enable the store_xxx attributes
+		dev_attr_file.attr.mode = 0644;
+		dev_attr_file.store = fsg_store_file;
+		if (!mod_data.cdrom) {
+			dev_attr_ro.attr.mode = 0644;
+			dev_attr_ro.store = fsg_store_ro;
+		}
+	}
+
+	/* Only for removable media? */
+	dev_attr_nofua.attr.mode = 0644;
+	dev_attr_nofua.store = fsg_store_nofua;
+
+	/* Find out how many LUNs there should be */
+	i = mod_data.nluns;
+	if (i == 0)
+		i = max(mod_data.num_filenames, 1u);
+	if (i > FSG_MAX_LUNS) {
+		ERROR(fsg, "invalid number of LUNs: %d\n", i);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs. */
+	fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
+	if (!fsg->luns) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	fsg->nluns = i;
+
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		curlun->cdrom = !!mod_data.cdrom;
+		curlun->ro = mod_data.cdrom || mod_data.ro[i];
+		curlun->initially_ro = curlun->ro;
+		curlun->removable = mod_data.removable;
+		curlun->nofua = mod_data.nofua[i];
+		curlun->dev.release = lun_release;
+		curlun->dev.parent = &gadget->dev;
+		curlun->dev.driver = &fsg_driver.driver;
+		dev_set_drvdata(&curlun->dev, &fsg->filesem);
+		dev_set_name(&curlun->dev,"%s-lun%d",
+			     dev_name(&gadget->dev), i);
+
+		kref_get(&fsg->ref);
+		rc = device_register(&curlun->dev);
+		if (rc) {
+			INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
+			put_device(&curlun->dev);
+			goto out;
+		}
+		curlun->registered = 1;
+
+		rc = device_create_file(&curlun->dev, &dev_attr_ro);
+		if (rc)
+			goto out;
+		rc = device_create_file(&curlun->dev, &dev_attr_nofua);
+		if (rc)
+			goto out;
+		rc = device_create_file(&curlun->dev, &dev_attr_file);
+		if (rc)
+			goto out;
+
+		if (mod_data.file[i] && *mod_data.file[i]) {
+			rc = fsg_lun_open(curlun, mod_data.file[i]);
+			if (rc)
+				goto out;
+		} else if (!mod_data.removable) {
+			ERROR(fsg, "no file given for LUN%d\n", i);
+			rc = -EINVAL;
+			goto out;
+		}
+	}
+
+	/* Find all the endpoints we will use */
+	usb_ep_autoconfig_reset(gadget);
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg;		// claim the endpoint
+	fsg->bulk_in = ep;
+
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg;		// claim the endpoint
+	fsg->bulk_out = ep;
+
+	if (transport_is_cbi()) {
+		ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc);
+		if (!ep)
+			goto autoconf_fail;
+		ep->driver_data = fsg;		// claim the endpoint
+		fsg->intr_in = ep;
+	}
+
+	/* Fix up the descriptors */
+	device_desc.idVendor = cpu_to_le16(mod_data.vendor);
+	device_desc.idProduct = cpu_to_le16(mod_data.product);
+	device_desc.bcdDevice = cpu_to_le16(mod_data.release);
+
+	i = (transport_is_cbi() ? 3 : 2);	// Number of endpoints
+	fsg_intf_desc.bNumEndpoints = i;
+	fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type;
+	fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type;
+	fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+
+	if (gadget_is_dualspeed(gadget)) {
+		fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+
+		/* Assume endpoint addresses are the same for both speeds */
+		fsg_hs_bulk_in_desc.bEndpointAddress =
+			fsg_fs_bulk_in_desc.bEndpointAddress;
+		fsg_hs_bulk_out_desc.bEndpointAddress =
+			fsg_fs_bulk_out_desc.bEndpointAddress;
+		fsg_hs_intr_in_desc.bEndpointAddress =
+			fsg_fs_intr_in_desc.bEndpointAddress;
+	}
+
+	if (gadget_is_superspeed(gadget)) {
+		unsigned		max_burst;
+
+		fsg_ss_function[i + FSG_SS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+
+		/* Calculate bMaxBurst, we know packet size is 1024 */
+		max_burst = min_t(unsigned, mod_data.buflen / 1024, 15);
+
+		/* Assume endpoint addresses are the same for both speeds */
+		fsg_ss_bulk_in_desc.bEndpointAddress =
+			fsg_fs_bulk_in_desc.bEndpointAddress;
+		fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
+
+		fsg_ss_bulk_out_desc.bEndpointAddress =
+			fsg_fs_bulk_out_desc.bEndpointAddress;
+		fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
+	}
+
+	if (gadget_is_otg(gadget))
+		fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
+
+	rc = -ENOMEM;
+
+	/* Allocate the request and buffer for endpoint 0 */
+	fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
+	if (!req)
+		goto out;
+	req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL);
+	if (!req->buf)
+		goto out;
+	req->complete = ep0_complete;
+
+	/* Allocate the data buffers */
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		/* Allocate for the bulk-in endpoint.  We assume that
+		 * the buffer will also work with the bulk-out (and
+		 * interrupt-in) endpoint. */
+		bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL);
+		if (!bh->buf)
+			goto out;
+		bh->next = bh + 1;
+	}
+	fsg->buffhds[fsg_num_buffers - 1].next = &fsg->buffhds[0];
+
+	/* This should reflect the actual gadget power source */
+	usb_gadget_set_selfpowered(gadget);
+
+	snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
+			"%s %s with %s",
+			init_utsname()->sysname, init_utsname()->release,
+			gadget->name);
+
+	fsg->thread_task = kthread_create(fsg_main_thread, fsg,
+			"file-storage-gadget");
+	if (IS_ERR(fsg->thread_task)) {
+		rc = PTR_ERR(fsg->thread_task);
+		goto out;
+	}
+
+	INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+	INFO(fsg, "NOTE: This driver is deprecated.  "
+			"Consider using g_mass_storage instead.\n");
+	INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
+
+	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (fsg_lun_is_open(curlun)) {
+			p = NULL;
+			if (pathbuf) {
+				p = d_path(&curlun->filp->f_path,
+					   pathbuf, PATH_MAX);
+				if (IS_ERR(p))
+					p = NULL;
+			}
+			LINFO(curlun, "ro=%d, nofua=%d, file: %s\n",
+			      curlun->ro, curlun->nofua, (p ? p : "(error)"));
+		}
+	}
+	kfree(pathbuf);
+
+	DBG(fsg, "transport=%s (x%02x)\n",
+			mod_data.transport_name, mod_data.transport_type);
+	DBG(fsg, "protocol=%s (x%02x)\n",
+			mod_data.protocol_name, mod_data.protocol_type);
+	DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
+			mod_data.vendor, mod_data.product, mod_data.release);
+	DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
+			mod_data.removable, mod_data.can_stall,
+			mod_data.cdrom, mod_data.buflen);
+	DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
+
+	set_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Tell the thread to start working */
+	wake_up_process(fsg->thread_task);
+	return 0;
+
+autoconf_fail:
+	ERROR(fsg, "unable to autoconfigure all endpoints\n");
+	rc = -ENOTSUPP;
+
+out:
+	fsg->state = FSG_STATE_TERMINATED;	// The thread is dead
+	fsg_unbind(gadget);
+	complete(&fsg->thread_notifier);
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void fsg_suspend(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+
+	DBG(fsg, "suspend\n");
+	set_bit(SUSPENDED, &fsg->atomic_bitflags);
+}
+
+static void fsg_resume(struct usb_gadget *gadget)
+{
+	struct fsg_dev		*fsg = get_gadget_data(gadget);
+
+	DBG(fsg, "resume\n");
+	clear_bit(SUSPENDED, &fsg->atomic_bitflags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_gadget_driver		fsg_driver = {
+	.max_speed	= USB_SPEED_SUPER,
+	.function	= (char *) fsg_string_product,
+	.unbind		= fsg_unbind,
+	.disconnect	= fsg_disconnect,
+	.setup		= fsg_setup,
+	.suspend	= fsg_suspend,
+	.resume		= fsg_resume,
+
+	.driver		= {
+		.name		= DRIVER_NAME,
+		.owner		= THIS_MODULE,
+		// .release = ...
+		// .suspend = ...
+		// .resume = ...
+	},
+};
+
+
+static int __init fsg_alloc(void)
+{
+	struct fsg_dev		*fsg;
+
+	fsg = kzalloc(sizeof *fsg +
+		      fsg_num_buffers * sizeof *(fsg->buffhds), GFP_KERNEL);
+
+	if (!fsg)
+		return -ENOMEM;
+	spin_lock_init(&fsg->lock);
+	init_rwsem(&fsg->filesem);
+	kref_init(&fsg->ref);
+	init_completion(&fsg->thread_notifier);
+
+	the_fsg = fsg;
+	return 0;
+}
+
+
+static int __init fsg_init(void)
+{
+	int		rc;
+	struct fsg_dev	*fsg;
+
+	rc = fsg_num_buffers_validate();
+	if (rc != 0)
+		return rc;
+
+	if ((rc = fsg_alloc()) != 0)
+		return rc;
+	fsg = the_fsg;
+	if ((rc = usb_gadget_probe_driver(&fsg_driver, fsg_bind)) != 0)
+		kref_put(&fsg->ref, fsg_release);
+	return rc;
+}
+module_init(fsg_init);
+
+
+static void __exit fsg_cleanup(void)
+{
+	struct fsg_dev	*fsg = the_fsg;
+
+	/* Unregister the driver iff the thread hasn't already done so */
+	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
+		usb_gadget_unregister_driver(&fsg_driver);
+
+	/* Wait for the thread to finish up */
+	wait_for_completion(&fsg->thread_notifier);
+
+	kref_put(&fsg->ref, fsg_release);
+}
+module_exit(fsg_cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_mxc_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_mxc_udc.c
new file mode 100644
index 0000000..dcbc0a2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_mxc_udc.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2009
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * Description:
+ * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
+ * driver to function correctly on these systems.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fsl_devices.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+static struct clk *mxc_ahb_clk;
+static struct clk *mxc_usb_clk;
+
+/* workaround ENGcm09152 for i.MX35 */
+#define USBPHYCTRL_OTGBASE_OFFSET	0x608
+#define USBPHYCTRL_EVDO			(1 << 23)
+
+int fsl_udc_clk_init(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata;
+	unsigned long freq;
+	int ret;
+
+	pdata = pdev->dev.platform_data;
+
+	if (!cpu_is_mx35() && !cpu_is_mx25()) {
+		mxc_ahb_clk = clk_get(&pdev->dev, "usb_ahb");
+		if (IS_ERR(mxc_ahb_clk))
+			return PTR_ERR(mxc_ahb_clk);
+
+		ret = clk_enable(mxc_ahb_clk);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "clk_enable(\"usb_ahb\") failed\n");
+			goto eenahb;
+		}
+	}
+
+	/* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
+	mxc_usb_clk = clk_get(&pdev->dev, "usb");
+	if (IS_ERR(mxc_usb_clk)) {
+		dev_err(&pdev->dev, "clk_get(\"usb\") failed\n");
+		ret = PTR_ERR(mxc_usb_clk);
+		goto egusb;
+	}
+
+	if (!cpu_is_mx51()) {
+		freq = clk_get_rate(mxc_usb_clk);
+		if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
+		    (freq < 59999000 || freq > 60001000)) {
+			dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
+			ret = -EINVAL;
+			goto eclkrate;
+		}
+	}
+
+	ret = clk_enable(mxc_usb_clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "clk_enable(\"usb_clk\") failed\n");
+		goto eenusb;
+	}
+
+	return 0;
+
+eenusb:
+eclkrate:
+	clk_put(mxc_usb_clk);
+	mxc_usb_clk = NULL;
+egusb:
+	if (!cpu_is_mx35())
+		clk_disable(mxc_ahb_clk);
+eenahb:
+	if (!cpu_is_mx35())
+		clk_put(mxc_ahb_clk);
+	return ret;
+}
+
+void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+	if (cpu_is_mx35()) {
+		unsigned int v;
+
+		/* workaround ENGcm09152 for i.MX35 */
+		if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
+			v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
+					USBPHYCTRL_OTGBASE_OFFSET));
+			writel(v | USBPHYCTRL_EVDO,
+				MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
+					USBPHYCTRL_OTGBASE_OFFSET));
+		}
+	}
+
+	/* ULPI transceivers don't need usbpll */
+	if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
+		clk_disable(mxc_usb_clk);
+		clk_put(mxc_usb_clk);
+		mxc_usb_clk = NULL;
+	}
+}
+
+void fsl_udc_clk_release(void)
+{
+	if (mxc_usb_clk) {
+		clk_disable(mxc_usb_clk);
+		clk_put(mxc_usb_clk);
+	}
+	if (!cpu_is_mx35()) {
+		clk_disable(mxc_ahb_clk);
+		clk_put(mxc_ahb_clk);
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_qe_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_qe_udc.c
new file mode 100644
index 0000000..877a2c4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_qe_udc.c
@@ -0,0 +1,2823 @@
+/*
+ * driver/usb/gadget/fsl_qe_udc.c
+ *
+ * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * 	Xie Xiaobo <X.Xie@freescale.com>
+ * 	Li Yang <leoli@freescale.com>
+ * 	Based on bareboard code from Shlomi Gridish.
+ *
+ * Description:
+ * Freescle QE/CPM USB Pheripheral Controller Driver
+ * The controller can be found on MPC8360, MPC8272, and etc.
+ * MPC8360 Rev 1.1 may need QE mircocode update
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#undef USB_TRACE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <asm/qe.h>
+#include <asm/cpm.h>
+#include <asm/dma.h>
+#include <asm/reg.h>
+#include "fsl_qe_udc.h"
+
+#define DRIVER_DESC     "Freescale QE/CPM USB Device Controller driver"
+#define DRIVER_AUTHOR   "Xie XiaoBo"
+#define DRIVER_VERSION  "1.0"
+
+#define DMA_ADDR_INVALID        (~(dma_addr_t)0)
+
+static const char driver_name[] = "fsl_qe_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+/*ep name is important in gadget, it should obey the convention of ep_match()*/
+static const char *const ep_name[] = {
+	"ep0-control", /* everyone has ep0 */
+	/* 3 configurable endpoints */
+	"ep1",
+	"ep2",
+	"ep3",
+};
+
+static struct usb_endpoint_descriptor qe_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
+};
+
+/* it is initialized in probe()  */
+static struct qe_udc *udc_controller;
+
+/********************************************************************
+ *      Internal Used Function Start
+********************************************************************/
+/*-----------------------------------------------------------------
+ * done() - retire a request; caller blocked irqs
+ *--------------------------------------------------------------*/
+static void done(struct qe_ep *ep, struct qe_req *req, int status)
+{
+	struct qe_udc *udc = ep->udc;
+	unsigned char stopped = ep->stopped;
+
+	/* the req->queue pointer is used by ep_queue() func, in which
+	 * the request will be added into a udc_ep->queue 'd tail
+	 * so here the req will be dropped from the ep->queue
+	 */
+	list_del_init(&req->queue);
+
+	/* req.status should be set as -EINPROGRESS in ep_queue() */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (req->mapped) {
+		dma_unmap_single(udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			ep_is_in(ep)
+				? DMA_TO_DEVICE
+				: DMA_FROM_DEVICE);
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	} else
+		dma_sync_single_for_cpu(udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			ep_is_in(ep)
+				? DMA_TO_DEVICE
+				: DMA_FROM_DEVICE);
+
+	if (status && (status != -ESHUTDOWN))
+		dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock(&udc->lock);
+
+	/* this complete() should a func implemented by gadget layer,
+	 * eg fsg->bulk_in_complete() */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&udc->lock);
+
+	ep->stopped = stopped;
+}
+
+/*-----------------------------------------------------------------
+ * nuke(): delete all requests related to this ep
+ *--------------------------------------------------------------*/
+static void nuke(struct qe_ep *ep, int status)
+{
+	/* Whether this eq has request linked */
+	while (!list_empty(&ep->queue)) {
+		struct qe_req *req = NULL;
+		req = list_entry(ep->queue.next, struct qe_req, queue);
+
+		done(ep, req, status);
+	}
+}
+
+/*---------------------------------------------------------------------------*
+ * USB and Endpoint manipulate process, include parameter and register       *
+ *---------------------------------------------------------------------------*/
+/* @value: 1--set stall 0--clean stall */
+static int qe_eprx_stall_change(struct qe_ep *ep, int value)
+{
+	u16 tem_usep;
+	u8 epnum = ep->epnum;
+	struct qe_udc *udc = ep->udc;
+
+	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
+	tem_usep = tem_usep & ~USB_RHS_MASK;
+	if (value == 1)
+		tem_usep |= USB_RHS_STALL;
+	else if (ep->dir == USB_DIR_IN)
+		tem_usep |= USB_RHS_IGNORE_OUT;
+
+	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
+	return 0;
+}
+
+static int qe_eptx_stall_change(struct qe_ep *ep, int value)
+{
+	u16 tem_usep;
+	u8 epnum = ep->epnum;
+	struct qe_udc *udc = ep->udc;
+
+	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
+	tem_usep = tem_usep & ~USB_THS_MASK;
+	if (value == 1)
+		tem_usep |= USB_THS_STALL;
+	else if (ep->dir == USB_DIR_OUT)
+		tem_usep |= USB_THS_IGNORE_IN;
+
+	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
+
+	return 0;
+}
+
+static int qe_ep0_stall(struct qe_udc *udc)
+{
+	qe_eptx_stall_change(&udc->eps[0], 1);
+	qe_eprx_stall_change(&udc->eps[0], 1);
+	udc_controller->ep0_state = WAIT_FOR_SETUP;
+	udc_controller->ep0_dir = 0;
+	return 0;
+}
+
+static int qe_eprx_nack(struct qe_ep *ep)
+{
+	u8 epnum = ep->epnum;
+	struct qe_udc *udc = ep->udc;
+
+	if (ep->state == EP_STATE_IDLE) {
+		/* Set the ep's nack */
+		clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
+				USB_RHS_MASK, USB_RHS_NACK);
+
+		/* Mask Rx and Busy interrupts */
+		clrbits16(&udc->usb_regs->usb_usbmr,
+				(USB_E_RXB_MASK | USB_E_BSY_MASK));
+
+		ep->state = EP_STATE_NACK;
+	}
+	return 0;
+}
+
+static int qe_eprx_normal(struct qe_ep *ep)
+{
+	struct qe_udc *udc = ep->udc;
+
+	if (ep->state == EP_STATE_NACK) {
+		clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
+				USB_RTHS_MASK, USB_THS_IGNORE_IN);
+
+		/* Unmask RX interrupts */
+		out_be16(&udc->usb_regs->usb_usber,
+				USB_E_BSY_MASK | USB_E_RXB_MASK);
+		setbits16(&udc->usb_regs->usb_usbmr,
+				(USB_E_RXB_MASK | USB_E_BSY_MASK));
+
+		ep->state = EP_STATE_IDLE;
+		ep->has_data = 0;
+	}
+
+	return 0;
+}
+
+static int qe_ep_cmd_stoptx(struct qe_ep *ep)
+{
+	if (ep->udc->soc_type == PORT_CPM)
+		cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
+				CPM_USB_STOP_TX_OPCODE);
+	else
+		qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
+				ep->epnum, 0);
+
+	return 0;
+}
+
+static int qe_ep_cmd_restarttx(struct qe_ep *ep)
+{
+	if (ep->udc->soc_type == PORT_CPM)
+		cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
+				CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
+	else
+		qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
+				ep->epnum, 0);
+
+	return 0;
+}
+
+static int qe_ep_flushtxfifo(struct qe_ep *ep)
+{
+	struct qe_udc *udc = ep->udc;
+	int i;
+
+	i = (int)ep->epnum;
+
+	qe_ep_cmd_stoptx(ep);
+	out_8(&udc->usb_regs->usb_uscom,
+		USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
+	out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
+	out_be32(&udc->ep_param[i]->tstate, 0);
+	out_be16(&udc->ep_param[i]->tbcnt, 0);
+
+	ep->c_txbd = ep->txbase;
+	ep->n_txbd = ep->txbase;
+	qe_ep_cmd_restarttx(ep);
+	return 0;
+}
+
+static int qe_ep_filltxfifo(struct qe_ep *ep)
+{
+	struct qe_udc *udc = ep->udc;
+
+	out_8(&udc->usb_regs->usb_uscom,
+			USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
+	return 0;
+}
+
+static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
+{
+	struct qe_ep *ep;
+	u32 bdring_len;
+	struct qe_bd __iomem *bd;
+	int i;
+
+	ep = &udc->eps[pipe_num];
+
+	if (ep->dir == USB_DIR_OUT)
+		bdring_len = USB_BDRING_LEN_RX;
+	else
+		bdring_len = USB_BDRING_LEN;
+
+	bd = ep->rxbase;
+	for (i = 0; i < (bdring_len - 1); i++) {
+		out_be32((u32 __iomem *)bd, R_E | R_I);
+		bd++;
+	}
+	out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
+
+	bd = ep->txbase;
+	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
+		out_be32(&bd->buf, 0);
+		out_be32((u32 __iomem *)bd, 0);
+		bd++;
+	}
+	out_be32((u32 __iomem *)bd, T_W);
+
+	return 0;
+}
+
+static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
+{
+	struct qe_ep *ep;
+	u16 tmpusep;
+
+	ep = &udc->eps[pipe_num];
+	tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
+	tmpusep &= ~USB_RTHS_MASK;
+
+	switch (ep->dir) {
+	case USB_DIR_BOTH:
+		qe_ep_flushtxfifo(ep);
+		break;
+	case USB_DIR_OUT:
+		tmpusep |= USB_THS_IGNORE_IN;
+		break;
+	case USB_DIR_IN:
+		qe_ep_flushtxfifo(ep);
+		tmpusep |= USB_RHS_IGNORE_OUT;
+		break;
+	default:
+		break;
+	}
+	out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
+
+	qe_epbds_reset(udc, pipe_num);
+
+	return 0;
+}
+
+static int qe_ep_toggledata01(struct qe_ep *ep)
+{
+	ep->data01 ^= 0x1;
+	return 0;
+}
+
+static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
+{
+	struct qe_ep *ep = &udc->eps[pipe_num];
+	unsigned long tmp_addr = 0;
+	struct usb_ep_para __iomem *epparam;
+	int i;
+	struct qe_bd __iomem *bd;
+	int bdring_len;
+
+	if (ep->dir == USB_DIR_OUT)
+		bdring_len = USB_BDRING_LEN_RX;
+	else
+		bdring_len = USB_BDRING_LEN;
+
+	epparam = udc->ep_param[pipe_num];
+	/* alloc multi-ram for BD rings and set the ep parameters */
+	tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
+				USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
+	if (IS_ERR_VALUE(tmp_addr))
+		return -ENOMEM;
+
+	out_be16(&epparam->rbase, (u16)tmp_addr);
+	out_be16(&epparam->tbase, (u16)(tmp_addr +
+				(sizeof(struct qe_bd) * bdring_len)));
+
+	out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
+	out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
+
+	ep->rxbase = cpm_muram_addr(tmp_addr);
+	ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
+				* bdring_len));
+	ep->n_rxbd = ep->rxbase;
+	ep->e_rxbd = ep->rxbase;
+	ep->n_txbd = ep->txbase;
+	ep->c_txbd = ep->txbase;
+	ep->data01 = 0; /* data0 */
+
+	/* Init TX and RX bds */
+	bd = ep->rxbase;
+	for (i = 0; i < bdring_len - 1; i++) {
+		out_be32(&bd->buf, 0);
+		out_be32((u32 __iomem *)bd, 0);
+		bd++;
+	}
+	out_be32(&bd->buf, 0);
+	out_be32((u32 __iomem *)bd, R_W);
+
+	bd = ep->txbase;
+	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
+		out_be32(&bd->buf, 0);
+		out_be32((u32 __iomem *)bd, 0);
+		bd++;
+	}
+	out_be32(&bd->buf, 0);
+	out_be32((u32 __iomem *)bd, T_W);
+
+	return 0;
+}
+
+static int qe_ep_rxbd_update(struct qe_ep *ep)
+{
+	unsigned int size;
+	int i;
+	unsigned int tmp;
+	struct qe_bd __iomem *bd;
+	unsigned int bdring_len;
+
+	if (ep->rxbase == NULL)
+		return -EINVAL;
+
+	bd = ep->rxbase;
+
+	ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
+	if (ep->rxframe == NULL) {
+		dev_err(ep->udc->dev, "malloc rxframe failed\n");
+		return -ENOMEM;
+	}
+
+	qe_frame_init(ep->rxframe);
+
+	if (ep->dir == USB_DIR_OUT)
+		bdring_len = USB_BDRING_LEN_RX;
+	else
+		bdring_len = USB_BDRING_LEN;
+
+	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
+	ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
+	if (ep->rxbuffer == NULL) {
+		dev_err(ep->udc->dev, "malloc rxbuffer failed,size=%d\n",
+				size);
+		kfree(ep->rxframe);
+		return -ENOMEM;
+	}
+
+	ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
+	if (ep->rxbuf_d == DMA_ADDR_INVALID) {
+		ep->rxbuf_d = dma_map_single(udc_controller->gadget.dev.parent,
+					ep->rxbuffer,
+					size,
+					DMA_FROM_DEVICE);
+		ep->rxbufmap = 1;
+	} else {
+		dma_sync_single_for_device(udc_controller->gadget.dev.parent,
+					ep->rxbuf_d, size,
+					DMA_FROM_DEVICE);
+		ep->rxbufmap = 0;
+	}
+
+	size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
+	tmp = ep->rxbuf_d;
+	tmp = (u32)(((tmp >> 2) << 2) + 4);
+
+	for (i = 0; i < bdring_len - 1; i++) {
+		out_be32(&bd->buf, tmp);
+		out_be32((u32 __iomem *)bd, (R_E | R_I));
+		tmp = tmp + size;
+		bd++;
+	}
+	out_be32(&bd->buf, tmp);
+	out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
+
+	return 0;
+}
+
+static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
+{
+	struct qe_ep *ep = &udc->eps[pipe_num];
+	struct usb_ep_para __iomem *epparam;
+	u16 usep, logepnum;
+	u16 tmp;
+	u8 rtfcr = 0;
+
+	epparam = udc->ep_param[pipe_num];
+
+	usep = 0;
+	logepnum = (ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+	usep |= (logepnum << USB_EPNUM_SHIFT);
+
+	switch (ep->desc->bmAttributes & 0x03) {
+	case USB_ENDPOINT_XFER_BULK:
+		usep |= USB_TRANS_BULK;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		usep |=  USB_TRANS_ISO;
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		usep |= USB_TRANS_INT;
+		break;
+	default:
+		usep |= USB_TRANS_CTR;
+		break;
+	}
+
+	switch (ep->dir) {
+	case USB_DIR_OUT:
+		usep |= USB_THS_IGNORE_IN;
+		break;
+	case USB_DIR_IN:
+		usep |= USB_RHS_IGNORE_OUT;
+		break;
+	default:
+		break;
+	}
+	out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
+
+	rtfcr = 0x30;
+	out_8(&epparam->rbmr, rtfcr);
+	out_8(&epparam->tbmr, rtfcr);
+
+	tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
+	/* MRBLR must be divisble by 4 */
+	tmp = (u16)(((tmp >> 2) << 2) + 4);
+	out_be16(&epparam->mrblr, tmp);
+
+	return 0;
+}
+
+static int qe_ep_init(struct qe_udc *udc,
+		      unsigned char pipe_num,
+		      const struct usb_endpoint_descriptor *desc)
+{
+	struct qe_ep *ep = &udc->eps[pipe_num];
+	unsigned long flags;
+	int reval = 0;
+	u16 max = 0;
+
+	max = usb_endpoint_maxp(desc);
+
+	/* check the max package size validate for this endpoint */
+	/* Refer to USB2.0 spec table 9-13,
+	*/
+	if (pipe_num != 0) {
+		switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+		case USB_ENDPOINT_XFER_BULK:
+			if (strstr(ep->ep.name, "-iso")
+					|| strstr(ep->ep.name, "-int"))
+				goto en_done;
+			switch (udc->gadget.speed) {
+			case USB_SPEED_HIGH:
+			if ((max == 128) || (max == 256) || (max == 512))
+				break;
+			default:
+				switch (max) {
+				case 4:
+				case 8:
+				case 16:
+				case 32:
+				case 64:
+					break;
+				default:
+				case USB_SPEED_LOW:
+					goto en_done;
+				}
+			}
+			break;
+		case USB_ENDPOINT_XFER_INT:
+			if (strstr(ep->ep.name, "-iso"))	/* bulk is ok */
+				goto en_done;
+			switch (udc->gadget.speed) {
+			case USB_SPEED_HIGH:
+				if (max <= 1024)
+					break;
+			case USB_SPEED_FULL:
+				if (max <= 64)
+					break;
+			default:
+				if (max <= 8)
+					break;
+				goto en_done;
+			}
+			break;
+		case USB_ENDPOINT_XFER_ISOC:
+			if (strstr(ep->ep.name, "-bulk")
+				|| strstr(ep->ep.name, "-int"))
+				goto en_done;
+			switch (udc->gadget.speed) {
+			case USB_SPEED_HIGH:
+				if (max <= 1024)
+					break;
+			case USB_SPEED_FULL:
+				if (max <= 1023)
+					break;
+			default:
+				goto en_done;
+			}
+			break;
+		case USB_ENDPOINT_XFER_CONTROL:
+			if (strstr(ep->ep.name, "-iso")
+				|| strstr(ep->ep.name, "-int"))
+				goto en_done;
+			switch (udc->gadget.speed) {
+			case USB_SPEED_HIGH:
+			case USB_SPEED_FULL:
+				switch (max) {
+				case 1:
+				case 2:
+				case 4:
+				case 8:
+				case 16:
+				case 32:
+				case 64:
+					break;
+				default:
+					goto en_done;
+				}
+			case USB_SPEED_LOW:
+				switch (max) {
+				case 1:
+				case 2:
+				case 4:
+				case 8:
+					break;
+				default:
+					goto en_done;
+				}
+			default:
+				goto en_done;
+			}
+			break;
+
+		default:
+			goto en_done;
+		}
+	} /* if ep0*/
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* initialize ep structure */
+	ep->ep.maxpacket = max;
+	ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
+	ep->desc = desc;
+	ep->stopped = 0;
+	ep->init = 1;
+
+	if (pipe_num == 0) {
+		ep->dir = USB_DIR_BOTH;
+		udc->ep0_dir = USB_DIR_OUT;
+		udc->ep0_state = WAIT_FOR_SETUP;
+	} else	{
+		switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
+		case USB_DIR_OUT:
+			ep->dir = USB_DIR_OUT;
+			break;
+		case USB_DIR_IN:
+			ep->dir = USB_DIR_IN;
+		default:
+			break;
+		}
+	}
+
+	/* hardware special operation */
+	qe_ep_bd_init(udc, pipe_num);
+	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
+		reval = qe_ep_rxbd_update(ep);
+		if (reval)
+			goto en_done1;
+	}
+
+	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
+		ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
+		if (ep->txframe == NULL) {
+			dev_err(udc->dev, "malloc txframe failed\n");
+			goto en_done2;
+		}
+		qe_frame_init(ep->txframe);
+	}
+
+	qe_ep_register_init(udc, pipe_num);
+
+	/* Now HW will be NAKing transfers to that EP,
+	 * until a buffer is queued to it. */
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+en_done2:
+	kfree(ep->rxbuffer);
+	kfree(ep->rxframe);
+en_done1:
+	spin_unlock_irqrestore(&udc->lock, flags);
+en_done:
+	dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
+	return -ENODEV;
+}
+
+static inline void qe_usb_enable(void)
+{
+	setbits8(&udc_controller->usb_regs->usb_usmod, USB_MODE_EN);
+}
+
+static inline void qe_usb_disable(void)
+{
+	clrbits8(&udc_controller->usb_regs->usb_usmod, USB_MODE_EN);
+}
+
+/*----------------------------------------------------------------------------*
+ *		USB and EP basic manipulate function end		      *
+ *----------------------------------------------------------------------------*/
+
+
+/******************************************************************************
+		UDC transmit and receive process
+ ******************************************************************************/
+static void recycle_one_rxbd(struct qe_ep *ep)
+{
+	u32 bdstatus;
+
+	bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
+	bdstatus = R_I | R_E | (bdstatus & R_W);
+	out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
+
+	if (bdstatus & R_W)
+		ep->e_rxbd = ep->rxbase;
+	else
+		ep->e_rxbd++;
+}
+
+static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
+{
+	u32 bdstatus;
+	struct qe_bd __iomem *bd, *nextbd;
+	unsigned char stop = 0;
+
+	nextbd = ep->n_rxbd;
+	bd = ep->e_rxbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+
+	while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
+		bdstatus = R_E | R_I | (bdstatus & R_W);
+		out_be32((u32 __iomem *)bd, bdstatus);
+
+		if (bdstatus & R_W)
+			bd = ep->rxbase;
+		else
+			bd++;
+
+		bdstatus = in_be32((u32 __iomem *)bd);
+		if (stopatnext && (bd == nextbd))
+			stop = 1;
+	}
+
+	ep->e_rxbd = bd;
+}
+
+static void ep_recycle_rxbds(struct qe_ep *ep)
+{
+	struct qe_bd __iomem *bd = ep->n_rxbd;
+	u32 bdstatus;
+	u8 epnum = ep->epnum;
+	struct qe_udc *udc = ep->udc;
+
+	bdstatus = in_be32((u32 __iomem *)bd);
+	if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
+		bd = ep->rxbase +
+				((in_be16(&udc->ep_param[epnum]->rbptr) -
+				  in_be16(&udc->ep_param[epnum]->rbase))
+				 >> 3);
+		bdstatus = in_be32((u32 __iomem *)bd);
+
+		if (bdstatus & R_W)
+			bd = ep->rxbase;
+		else
+			bd++;
+
+		ep->e_rxbd = bd;
+		recycle_rxbds(ep, 0);
+		ep->e_rxbd = ep->n_rxbd;
+	} else
+		recycle_rxbds(ep, 1);
+
+	if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
+		out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
+
+	if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
+		qe_eprx_normal(ep);
+
+	ep->localnack = 0;
+}
+
+static void setup_received_handle(struct qe_udc *udc,
+					struct usb_ctrlrequest *setup);
+static int qe_ep_rxframe_handle(struct qe_ep *ep);
+static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
+/* when BD PID is setup, handle the packet */
+static int ep0_setup_handle(struct qe_udc *udc)
+{
+	struct qe_ep *ep = &udc->eps[0];
+	struct qe_frame *pframe;
+	unsigned int fsize;
+	u8 *cp;
+
+	pframe = ep->rxframe;
+	if ((frame_get_info(pframe) & PID_SETUP)
+			&& (udc->ep0_state == WAIT_FOR_SETUP)) {
+		fsize = frame_get_length(pframe);
+		if (unlikely(fsize != 8))
+			return -EINVAL;
+		cp = (u8 *)&udc->local_setup_buff;
+		memcpy(cp, pframe->data, fsize);
+		ep->data01 = 1;
+
+		/* handle the usb command base on the usb_ctrlrequest */
+		setup_received_handle(udc, &udc->local_setup_buff);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int qe_ep0_rx(struct qe_udc *udc)
+{
+	struct qe_ep *ep = &udc->eps[0];
+	struct qe_frame *pframe;
+	struct qe_bd __iomem *bd;
+	u32 bdstatus, length;
+	u32 vaddr;
+
+	pframe = ep->rxframe;
+
+	if (ep->dir == USB_DIR_IN) {
+		dev_err(udc->dev, "ep0 not a control endpoint\n");
+		return -EINVAL;
+	}
+
+	bd = ep->n_rxbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+	length = bdstatus & BD_LENGTH_MASK;
+
+	while (!(bdstatus & R_E) && length) {
+		if ((bdstatus & R_F) && (bdstatus & R_L)
+			&& !(bdstatus & R_ERROR)) {
+			if (length == USB_CRC_SIZE) {
+				udc->ep0_state = WAIT_FOR_SETUP;
+				dev_vdbg(udc->dev,
+					"receive a ZLP in status phase\n");
+			} else {
+				qe_frame_clean(pframe);
+				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+				frame_set_data(pframe, (u8 *)vaddr);
+				frame_set_length(pframe,
+						(length - USB_CRC_SIZE));
+				frame_set_status(pframe, FRAME_OK);
+				switch (bdstatus & R_PID) {
+				case R_PID_SETUP:
+					frame_set_info(pframe, PID_SETUP);
+					break;
+				case R_PID_DATA1:
+					frame_set_info(pframe, PID_DATA1);
+					break;
+				default:
+					frame_set_info(pframe, PID_DATA0);
+					break;
+				}
+
+				if ((bdstatus & R_PID) == R_PID_SETUP)
+					ep0_setup_handle(udc);
+				else
+					qe_ep_rxframe_handle(ep);
+			}
+		} else {
+			dev_err(udc->dev, "The receive frame with error!\n");
+		}
+
+		/* note: don't clear the rxbd's buffer address */
+		recycle_one_rxbd(ep);
+
+		/* Get next BD */
+		if (bdstatus & R_W)
+			bd = ep->rxbase;
+		else
+			bd++;
+
+		bdstatus = in_be32((u32 __iomem *)bd);
+		length = bdstatus & BD_LENGTH_MASK;
+
+	}
+
+	ep->n_rxbd = bd;
+
+	return 0;
+}
+
+static int qe_ep_rxframe_handle(struct qe_ep *ep)
+{
+	struct qe_frame *pframe;
+	u8 framepid = 0;
+	unsigned int fsize;
+	u8 *cp;
+	struct qe_req *req;
+
+	pframe = ep->rxframe;
+
+	if (frame_get_info(pframe) & PID_DATA1)
+		framepid = 0x1;
+
+	if (framepid != ep->data01) {
+		dev_err(ep->udc->dev, "the data01 error!\n");
+		return -EIO;
+	}
+
+	fsize = frame_get_length(pframe);
+	if (list_empty(&ep->queue)) {
+		dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
+	} else {
+		req = list_entry(ep->queue.next, struct qe_req, queue);
+
+		cp = (u8 *)(req->req.buf) + req->req.actual;
+		if (cp) {
+			memcpy(cp, pframe->data, fsize);
+			req->req.actual += fsize;
+			if ((fsize < ep->ep.maxpacket) ||
+					(req->req.actual >= req->req.length)) {
+				if (ep->epnum == 0)
+					ep0_req_complete(ep->udc, req);
+				else
+					done(ep, req, 0);
+				if (list_empty(&ep->queue) && ep->epnum != 0)
+					qe_eprx_nack(ep);
+			}
+		}
+	}
+
+	qe_ep_toggledata01(ep);
+
+	return 0;
+}
+
+static void ep_rx_tasklet(unsigned long data)
+{
+	struct qe_udc *udc = (struct qe_udc *)data;
+	struct qe_ep *ep;
+	struct qe_frame *pframe;
+	struct qe_bd __iomem *bd;
+	unsigned long flags;
+	u32 bdstatus, length;
+	u32 vaddr, i;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
+		ep = &udc->eps[i];
+
+		if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
+			dev_dbg(udc->dev,
+				"This is a transmit ep or disable tasklet!\n");
+			continue;
+		}
+
+		pframe = ep->rxframe;
+		bd = ep->n_rxbd;
+		bdstatus = in_be32((u32 __iomem *)bd);
+		length = bdstatus & BD_LENGTH_MASK;
+
+		while (!(bdstatus & R_E) && length) {
+			if (list_empty(&ep->queue)) {
+				qe_eprx_nack(ep);
+				dev_dbg(udc->dev,
+					"The rxep have noreq %d\n",
+					ep->has_data);
+				break;
+			}
+
+			if ((bdstatus & R_F) && (bdstatus & R_L)
+				&& !(bdstatus & R_ERROR)) {
+				qe_frame_clean(pframe);
+				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+				frame_set_data(pframe, (u8 *)vaddr);
+				frame_set_length(pframe,
+						(length - USB_CRC_SIZE));
+				frame_set_status(pframe, FRAME_OK);
+				switch (bdstatus & R_PID) {
+				case R_PID_DATA1:
+					frame_set_info(pframe, PID_DATA1);
+					break;
+				case R_PID_SETUP:
+					frame_set_info(pframe, PID_SETUP);
+					break;
+				default:
+					frame_set_info(pframe, PID_DATA0);
+					break;
+				}
+				/* handle the rx frame */
+				qe_ep_rxframe_handle(ep);
+			} else {
+				dev_err(udc->dev,
+					"error in received frame\n");
+			}
+			/* note: don't clear the rxbd's buffer address */
+			/*clear the length */
+			out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
+			ep->has_data--;
+			if (!(ep->localnack))
+				recycle_one_rxbd(ep);
+
+			/* Get next BD */
+			if (bdstatus & R_W)
+				bd = ep->rxbase;
+			else
+				bd++;
+
+			bdstatus = in_be32((u32 __iomem *)bd);
+			length = bdstatus & BD_LENGTH_MASK;
+		}
+
+		ep->n_rxbd = bd;
+
+		if (ep->localnack)
+			ep_recycle_rxbds(ep);
+
+		ep->enable_tasklet = 0;
+	} /* for i=1 */
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static int qe_ep_rx(struct qe_ep *ep)
+{
+	struct qe_udc *udc;
+	struct qe_frame *pframe;
+	struct qe_bd __iomem *bd;
+	u16 swoffs, ucoffs, emptybds;
+
+	udc = ep->udc;
+	pframe = ep->rxframe;
+
+	if (ep->dir == USB_DIR_IN) {
+		dev_err(udc->dev, "transmit ep in rx function\n");
+		return -EINVAL;
+	}
+
+	bd = ep->n_rxbd;
+
+	swoffs = (u16)(bd - ep->rxbase);
+	ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
+			in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
+	if (swoffs < ucoffs)
+		emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
+	else
+		emptybds = swoffs - ucoffs;
+
+	if (emptybds < MIN_EMPTY_BDS) {
+		qe_eprx_nack(ep);
+		ep->localnack = 1;
+		dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
+	}
+	ep->has_data = USB_BDRING_LEN_RX - emptybds;
+
+	if (list_empty(&ep->queue)) {
+		qe_eprx_nack(ep);
+		dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
+				ep->has_data);
+		return 0;
+	}
+
+	tasklet_schedule(&udc->rx_tasklet);
+	ep->enable_tasklet = 1;
+
+	return 0;
+}
+
+/* send data from a frame, no matter what tx_req */
+static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
+{
+	struct qe_udc *udc = ep->udc;
+	struct qe_bd __iomem *bd;
+	u16 saveusbmr;
+	u32 bdstatus, pidmask;
+	u32 paddr;
+
+	if (ep->dir == USB_DIR_OUT) {
+		dev_err(udc->dev, "receive ep passed to tx function\n");
+		return -EINVAL;
+	}
+
+	/* Disable the Tx interrupt */
+	saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
+	out_be16(&udc->usb_regs->usb_usbmr,
+			saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
+
+	bd = ep->n_txbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+
+	if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
+		if (frame_get_length(frame) == 0) {
+			frame_set_data(frame, udc->nullbuf);
+			frame_set_length(frame, 2);
+			frame->info |= (ZLP | NO_CRC);
+			dev_vdbg(udc->dev, "the frame size = 0\n");
+		}
+		paddr = virt_to_phys((void *)frame->data);
+		out_be32(&bd->buf, paddr);
+		bdstatus = (bdstatus&T_W);
+		if (!(frame_get_info(frame) & NO_CRC))
+			bdstatus |= T_R | T_I | T_L | T_TC
+					| frame_get_length(frame);
+		else
+			bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
+
+		/* if the packet is a ZLP in status phase */
+		if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
+			ep->data01 = 0x1;
+
+		if (ep->data01) {
+			pidmask = T_PID_DATA1;
+			frame->info |= PID_DATA1;
+		} else {
+			pidmask = T_PID_DATA0;
+			frame->info |= PID_DATA0;
+		}
+		bdstatus |= T_CNF;
+		bdstatus |= pidmask;
+		out_be32((u32 __iomem *)bd, bdstatus);
+		qe_ep_filltxfifo(ep);
+
+		/* enable the TX interrupt */
+		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
+
+		qe_ep_toggledata01(ep);
+		if (bdstatus & T_W)
+			ep->n_txbd = ep->txbase;
+		else
+			ep->n_txbd++;
+
+		return 0;
+	} else {
+		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
+		dev_vdbg(udc->dev, "The tx bd is not ready!\n");
+		return -EBUSY;
+	}
+}
+
+/* when a bd was transmitted, the function can
+ * handle the tx_req, not include ep0           */
+static int txcomplete(struct qe_ep *ep, unsigned char restart)
+{
+	if (ep->tx_req != NULL) {
+		struct qe_req *req = ep->tx_req;
+		unsigned zlp = 0, last_len = 0;
+
+		last_len = min_t(unsigned, req->req.length - ep->sent,
+				ep->ep.maxpacket);
+
+		if (!restart) {
+			int asent = ep->last;
+			ep->sent += asent;
+			ep->last -= asent;
+		} else {
+			ep->last = 0;
+		}
+
+		/* zlp needed when req->re.zero is set */
+		if (req->req.zero) {
+			if (last_len == 0 ||
+				(req->req.length % ep->ep.maxpacket) != 0)
+				zlp = 0;
+			else
+				zlp = 1;
+		} else
+			zlp = 0;
+
+		/* a request already were transmitted completely */
+		if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
+			done(ep, ep->tx_req, 0);
+			ep->tx_req = NULL;
+			ep->last = 0;
+			ep->sent = 0;
+		}
+	}
+
+	/* we should gain a new tx_req fot this endpoint */
+	if (ep->tx_req == NULL) {
+		if (!list_empty(&ep->queue)) {
+			ep->tx_req = list_entry(ep->queue.next,	struct qe_req,
+							queue);
+			ep->last = 0;
+			ep->sent = 0;
+		}
+	}
+
+	return 0;
+}
+
+/* give a frame and a tx_req, send some data */
+static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
+{
+	unsigned int size;
+	u8 *buf;
+
+	qe_frame_clean(frame);
+	size = min_t(u32, (ep->tx_req->req.length - ep->sent),
+				ep->ep.maxpacket);
+	buf = (u8 *)ep->tx_req->req.buf + ep->sent;
+	if (buf && size) {
+		ep->last = size;
+		ep->tx_req->req.actual += size;
+		frame_set_data(frame, buf);
+		frame_set_length(frame, size);
+		frame_set_status(frame, FRAME_OK);
+		frame_set_info(frame, 0);
+		return qe_ep_tx(ep, frame);
+	}
+	return -EIO;
+}
+
+/* give a frame struct,send a ZLP */
+static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
+{
+	struct qe_udc *udc = ep->udc;
+
+	if (frame == NULL)
+		return -ENODEV;
+
+	qe_frame_clean(frame);
+	frame_set_data(frame, (u8 *)udc->nullbuf);
+	frame_set_length(frame, 2);
+	frame_set_status(frame, FRAME_OK);
+	frame_set_info(frame, (ZLP | NO_CRC | infor));
+
+	return qe_ep_tx(ep, frame);
+}
+
+static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
+{
+	struct qe_req *req = ep->tx_req;
+	int reval;
+
+	if (req == NULL)
+		return -ENODEV;
+
+	if ((req->req.length - ep->sent) > 0)
+		reval = qe_usb_senddata(ep, frame);
+	else
+		reval = sendnulldata(ep, frame, 0);
+
+	return reval;
+}
+
+/* if direction is DIR_IN, the status is Device->Host
+ * if direction is DIR_OUT, the status transaction is Device<-Host
+ * in status phase, udc create a request and gain status */
+static int ep0_prime_status(struct qe_udc *udc, int direction)
+{
+
+	struct qe_ep *ep = &udc->eps[0];
+
+	if (direction == USB_DIR_IN) {
+		udc->ep0_state = DATA_STATE_NEED_ZLP;
+		udc->ep0_dir = USB_DIR_IN;
+		sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
+	} else {
+		udc->ep0_dir = USB_DIR_OUT;
+		udc->ep0_state = WAIT_FOR_OUT_STATUS;
+	}
+
+	return 0;
+}
+
+/* a request complete in ep0, whether gadget request or udc request */
+static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
+{
+	struct qe_ep *ep = &udc->eps[0];
+	/* because usb and ep's status already been set in ch9setaddress() */
+
+	switch (udc->ep0_state) {
+	case DATA_STATE_XMIT:
+		done(ep, req, 0);
+		/* receive status phase */
+		if (ep0_prime_status(udc, USB_DIR_OUT))
+			qe_ep0_stall(udc);
+		break;
+
+	case DATA_STATE_NEED_ZLP:
+		done(ep, req, 0);
+		udc->ep0_state = WAIT_FOR_SETUP;
+		break;
+
+	case DATA_STATE_RECV:
+		done(ep, req, 0);
+		/* send status phase */
+		if (ep0_prime_status(udc, USB_DIR_IN))
+			qe_ep0_stall(udc);
+		break;
+
+	case WAIT_FOR_OUT_STATUS:
+		done(ep, req, 0);
+		udc->ep0_state = WAIT_FOR_SETUP;
+		break;
+
+	case WAIT_FOR_SETUP:
+		dev_vdbg(udc->dev, "Unexpected interrupt\n");
+		break;
+
+	default:
+		qe_ep0_stall(udc);
+		break;
+	}
+}
+
+static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
+{
+	struct qe_req *tx_req = NULL;
+	struct qe_frame *frame = ep->txframe;
+
+	if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
+		if (!restart)
+			ep->udc->ep0_state = WAIT_FOR_SETUP;
+		else
+			sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
+		return 0;
+	}
+
+	tx_req = ep->tx_req;
+	if (tx_req != NULL) {
+		if (!restart) {
+			int asent = ep->last;
+			ep->sent += asent;
+			ep->last -= asent;
+		} else {
+			ep->last = 0;
+		}
+
+		/* a request already were transmitted completely */
+		if ((ep->tx_req->req.length - ep->sent) <= 0) {
+			ep->tx_req->req.actual = (unsigned int)ep->sent;
+			ep0_req_complete(ep->udc, ep->tx_req);
+			ep->tx_req = NULL;
+			ep->last = 0;
+			ep->sent = 0;
+		}
+	} else {
+		dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
+	}
+
+	return 0;
+}
+
+static int ep0_txframe_handle(struct qe_ep *ep)
+{
+	/* if have error, transmit again */
+	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
+		qe_ep_flushtxfifo(ep);
+		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
+		if (frame_get_info(ep->txframe) & PID_DATA0)
+			ep->data01 = 0;
+		else
+			ep->data01 = 1;
+
+		ep0_txcomplete(ep, 1);
+	} else
+		ep0_txcomplete(ep, 0);
+
+	frame_create_tx(ep, ep->txframe);
+	return 0;
+}
+
+static int qe_ep0_txconf(struct qe_ep *ep)
+{
+	struct qe_bd __iomem *bd;
+	struct qe_frame *pframe;
+	u32 bdstatus;
+
+	bd = ep->c_txbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
+		pframe = ep->txframe;
+
+		/* clear and recycle the BD */
+		out_be32((u32 __iomem *)bd, bdstatus & T_W);
+		out_be32(&bd->buf, 0);
+		if (bdstatus & T_W)
+			ep->c_txbd = ep->txbase;
+		else
+			ep->c_txbd++;
+
+		if (ep->c_txbd == ep->n_txbd) {
+			if (bdstatus & DEVICE_T_ERROR) {
+				frame_set_status(pframe, FRAME_ERROR);
+				if (bdstatus & T_TO)
+					pframe->status |= TX_ER_TIMEOUT;
+				if (bdstatus & T_UN)
+					pframe->status |= TX_ER_UNDERUN;
+			}
+			ep0_txframe_handle(ep);
+		}
+
+		bd = ep->c_txbd;
+		bdstatus = in_be32((u32 __iomem *)bd);
+	}
+
+	return 0;
+}
+
+static int ep_txframe_handle(struct qe_ep *ep)
+{
+	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
+		qe_ep_flushtxfifo(ep);
+		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
+		if (frame_get_info(ep->txframe) & PID_DATA0)
+			ep->data01 = 0;
+		else
+			ep->data01 = 1;
+
+		txcomplete(ep, 1);
+	} else
+		txcomplete(ep, 0);
+
+	frame_create_tx(ep, ep->txframe); /* send the data */
+	return 0;
+}
+
+/* confirm the already trainsmited bd */
+static int qe_ep_txconf(struct qe_ep *ep)
+{
+	struct qe_bd __iomem *bd;
+	struct qe_frame *pframe = NULL;
+	u32 bdstatus;
+	unsigned char breakonrxinterrupt = 0;
+
+	bd = ep->c_txbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
+		pframe = ep->txframe;
+		if (bdstatus & DEVICE_T_ERROR) {
+			frame_set_status(pframe, FRAME_ERROR);
+			if (bdstatus & T_TO)
+				pframe->status |= TX_ER_TIMEOUT;
+			if (bdstatus & T_UN)
+				pframe->status |= TX_ER_UNDERUN;
+		}
+
+		/* clear and recycle the BD */
+		out_be32((u32 __iomem *)bd, bdstatus & T_W);
+		out_be32(&bd->buf, 0);
+		if (bdstatus & T_W)
+			ep->c_txbd = ep->txbase;
+		else
+			ep->c_txbd++;
+
+		/* handle the tx frame */
+		ep_txframe_handle(ep);
+		bd = ep->c_txbd;
+		bdstatus = in_be32((u32 __iomem *)bd);
+	}
+	if (breakonrxinterrupt)
+		return -EIO;
+	else
+		return 0;
+}
+
+/* Add a request in queue, and try to transmit a packet */
+static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
+{
+	int reval = 0;
+
+	if (ep->tx_req == NULL) {
+		ep->sent = 0;
+		ep->last = 0;
+		txcomplete(ep, 0); /* can gain a new tx_req */
+		reval = frame_create_tx(ep, ep->txframe);
+	}
+	return reval;
+}
+
+/* Maybe this is a good ideal */
+static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
+{
+	struct qe_udc *udc = ep->udc;
+	struct qe_frame *pframe = NULL;
+	struct qe_bd __iomem *bd;
+	u32 bdstatus, length;
+	u32 vaddr, fsize;
+	u8 *cp;
+	u8 finish_req = 0;
+	u8 framepid;
+
+	if (list_empty(&ep->queue)) {
+		dev_vdbg(udc->dev, "the req already finish!\n");
+		return 0;
+	}
+	pframe = ep->rxframe;
+
+	bd = ep->n_rxbd;
+	bdstatus = in_be32((u32 __iomem *)bd);
+	length = bdstatus & BD_LENGTH_MASK;
+
+	while (!(bdstatus & R_E) && length) {
+		if (finish_req)
+			break;
+		if ((bdstatus & R_F) && (bdstatus & R_L)
+					&& !(bdstatus & R_ERROR)) {
+			qe_frame_clean(pframe);
+			vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+			frame_set_data(pframe, (u8 *)vaddr);
+			frame_set_length(pframe, (length - USB_CRC_SIZE));
+			frame_set_status(pframe, FRAME_OK);
+			switch (bdstatus & R_PID) {
+			case R_PID_DATA1:
+				frame_set_info(pframe, PID_DATA1); break;
+			default:
+				frame_set_info(pframe, PID_DATA0); break;
+			}
+			/* handle the rx frame */
+
+			if (frame_get_info(pframe) & PID_DATA1)
+				framepid = 0x1;
+			else
+				framepid = 0;
+
+			if (framepid != ep->data01) {
+				dev_vdbg(udc->dev, "the data01 error!\n");
+			} else {
+				fsize = frame_get_length(pframe);
+
+				cp = (u8 *)(req->req.buf) + req->req.actual;
+				if (cp) {
+					memcpy(cp, pframe->data, fsize);
+					req->req.actual += fsize;
+					if ((fsize < ep->ep.maxpacket)
+						|| (req->req.actual >=
+							req->req.length)) {
+						finish_req = 1;
+						done(ep, req, 0);
+						if (list_empty(&ep->queue))
+							qe_eprx_nack(ep);
+					}
+				}
+				qe_ep_toggledata01(ep);
+			}
+		} else {
+			dev_err(udc->dev, "The receive frame with error!\n");
+		}
+
+		/* note: don't clear the rxbd's buffer address *
+		 * only Clear the length */
+		out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
+		ep->has_data--;
+
+		/* Get next BD */
+		if (bdstatus & R_W)
+			bd = ep->rxbase;
+		else
+			bd++;
+
+		bdstatus = in_be32((u32 __iomem *)bd);
+		length = bdstatus & BD_LENGTH_MASK;
+	}
+
+	ep->n_rxbd = bd;
+	ep_recycle_rxbds(ep);
+
+	return 0;
+}
+
+/* only add the request in queue */
+static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
+{
+	if (ep->state == EP_STATE_NACK) {
+		if (ep->has_data <= 0) {
+			/* Enable rx and unmask rx interrupt */
+			qe_eprx_normal(ep);
+		} else {
+			/* Copy the exist BD data */
+			ep_req_rx(ep, req);
+		}
+	}
+
+	return 0;
+}
+
+/********************************************************************
+	Internal Used Function End
+********************************************************************/
+
+/*-----------------------------------------------------------------------
+	Endpoint Management Functions For Gadget
+ -----------------------------------------------------------------------*/
+static int qe_ep_enable(struct usb_ep *_ep,
+			 const struct usb_endpoint_descriptor *desc)
+{
+	struct qe_udc *udc;
+	struct qe_ep *ep;
+	int retval = 0;
+	unsigned char epnum;
+
+	ep = container_of(_ep, struct qe_ep, ep);
+
+	/* catch various bogus parameters */
+	if (!_ep || !desc || ep->desc || _ep->name == ep_name[0] ||
+			(desc->bDescriptorType != USB_DT_ENDPOINT))
+		return -EINVAL;
+
+	udc = ep->udc;
+	if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+
+	epnum = (u8)desc->bEndpointAddress & 0xF;
+
+	retval = qe_ep_init(udc, epnum, desc);
+	if (retval != 0) {
+		cpm_muram_free(cpm_muram_offset(ep->rxbase));
+		dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
+		return -EINVAL;
+	}
+	dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
+	return 0;
+}
+
+static int qe_ep_disable(struct usb_ep *_ep)
+{
+	struct qe_udc *udc;
+	struct qe_ep *ep;
+	unsigned long flags;
+	unsigned int size;
+
+	ep = container_of(_ep, struct qe_ep, ep);
+	udc = ep->udc;
+
+	if (!_ep || !ep->desc) {
+		dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&udc->lock, flags);
+	/* Nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->stopped = 1;
+	ep->tx_req = NULL;
+	qe_ep_reset(udc, ep->epnum);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	cpm_muram_free(cpm_muram_offset(ep->rxbase));
+
+	if (ep->dir == USB_DIR_OUT)
+		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
+				(USB_BDRING_LEN_RX + 1);
+	else
+		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
+				(USB_BDRING_LEN + 1);
+
+	if (ep->dir != USB_DIR_IN) {
+		kfree(ep->rxframe);
+		if (ep->rxbufmap) {
+			dma_unmap_single(udc_controller->gadget.dev.parent,
+					ep->rxbuf_d, size,
+					DMA_FROM_DEVICE);
+			ep->rxbuf_d = DMA_ADDR_INVALID;
+		} else {
+			dma_sync_single_for_cpu(
+					udc_controller->gadget.dev.parent,
+					ep->rxbuf_d, size,
+					DMA_FROM_DEVICE);
+		}
+		kfree(ep->rxbuffer);
+	}
+
+	if (ep->dir != USB_DIR_OUT)
+		kfree(ep->txframe);
+
+	dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
+	return 0;
+}
+
+static struct usb_request *qe_alloc_request(struct usb_ep *_ep,	gfp_t gfp_flags)
+{
+	struct qe_req *req;
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct qe_req *req;
+
+	req = container_of(_req, struct qe_req, req);
+
+	if (_req)
+		kfree(req);
+}
+
+static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+	struct qe_req *req = container_of(_req, struct qe_req, req);
+	struct qe_udc *udc;
+	int reval;
+
+	udc = ep->udc;
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		dev_dbg(udc->dev, "bad params\n");
+		return -EINVAL;
+	}
+	if (!_ep || (!ep->desc && ep_index(ep))) {
+		dev_dbg(udc->dev, "bad ep\n");
+		return -EINVAL;
+	}
+
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	req->ep = ep;
+
+	/* map virtual address to hardware */
+	if (req->req.dma == DMA_ADDR_INVALID) {
+		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+					req->req.buf,
+					req->req.length,
+					ep_is_in(ep)
+					? DMA_TO_DEVICE :
+					DMA_FROM_DEVICE);
+		req->mapped = 1;
+	} else {
+		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+					req->req.dma, req->req.length,
+					ep_is_in(ep)
+					? DMA_TO_DEVICE :
+					DMA_FROM_DEVICE);
+		req->mapped = 0;
+	}
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+
+	list_add_tail(&req->queue, &ep->queue);
+	dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
+			ep->name, req->req.length);
+
+	/* push the request to device */
+	if (ep_is_in(ep))
+		reval = ep_req_send(ep, req);
+
+	/* EP0 */
+	if (ep_index(ep) == 0 && req->req.length > 0) {
+		if (ep_is_in(ep))
+			udc->ep0_state = DATA_STATE_XMIT;
+		else
+			udc->ep0_state = DATA_STATE_RECV;
+	}
+
+	if (ep->dir == USB_DIR_OUT)
+		reval = ep_req_receive(ep, req);
+
+	return 0;
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+		       gfp_t gfp_flags)
+{
+	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+	struct qe_udc *udc = ep->udc;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	ret = __qe_ep_queue(_ep, _req);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return ret;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+	struct qe_req *req;
+	unsigned long flags;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+
+	if (&req->req != _req) {
+		spin_unlock_irqrestore(&ep->udc->lock, flags);
+		return -EINVAL;
+	}
+
+	done(ep, req, -ECONNRESET);
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return 0;
+}
+
+/*-----------------------------------------------------------------
+ * modify the endpoint halt feature
+ * @ep: the non-isochronous endpoint being stalled
+ * @value: 1--set halt  0--clear halt
+ * Returns zero, or a negative error code.
+*----------------------------------------------------------------*/
+static int qe_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct qe_ep *ep;
+	unsigned long flags;
+	int status = -EOPNOTSUPP;
+	struct qe_udc *udc;
+
+	ep = container_of(_ep, struct qe_ep, ep);
+	if (!_ep || !ep->desc) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	udc = ep->udc;
+	/* Attempt to halt IN ep will fail if any transfer requests
+	 * are still queue */
+	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
+		status = -EAGAIN;
+		goto out;
+	}
+
+	status = 0;
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	qe_eptx_stall_change(ep, value);
+	qe_eprx_stall_change(ep, value);
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+	if (ep->epnum == 0) {
+		udc->ep0_state = WAIT_FOR_SETUP;
+		udc->ep0_dir = 0;
+	}
+
+	/* set data toggle to DATA0 on clear halt */
+	if (value == 0)
+		ep->data01 = 0;
+out:
+	dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
+			value ?  "set" : "clear", status);
+
+	return status;
+}
+
+static struct usb_ep_ops qe_ep_ops = {
+	.enable = qe_ep_enable,
+	.disable = qe_ep_disable,
+
+	.alloc_request = qe_alloc_request,
+	.free_request = qe_free_request,
+
+	.queue = qe_ep_queue,
+	.dequeue = qe_ep_dequeue,
+
+	.set_halt = qe_ep_set_halt,
+};
+
+/*------------------------------------------------------------------------
+	Gadget Driver Layer Operations
+ ------------------------------------------------------------------------*/
+
+/* Get the current frame number */
+static int qe_get_frame(struct usb_gadget *gadget)
+{
+	u16 tmp;
+
+	tmp = in_be16(&udc_controller->usb_param->frame_n);
+	if (tmp & 0x8000)
+		tmp = tmp & 0x07ff;
+	else
+		tmp = -EINVAL;
+
+	return (int)tmp;
+}
+
+/* Tries to wake up the host connected to this gadget
+ *
+ * Return : 0-success
+ * Negative-this feature not enabled by host or not supported by device hw
+ */
+static int qe_wakeup(struct usb_gadget *gadget)
+{
+	return -ENOTSUPP;
+}
+
+/* Notify controller that VBUS is powered, Called by whatever
+   detects VBUS sessions */
+static int qe_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	return -ENOTSUPP;
+}
+
+/* constrain controller's VBUS power usage
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume.  For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static int qe_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	return -ENOTSUPP;
+}
+
+/* Change Data+ pullup status
+ * this func is used by usb_gadget_connect/disconnect
+ */
+static int qe_pullup(struct usb_gadget *gadget, int is_on)
+{
+	return -ENOTSUPP;
+}
+
+static int fsl_qe_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int fsl_qe_stop(struct usb_gadget_driver *driver);
+
+/* defined in usb_gadget.h */
+static struct usb_gadget_ops qe_gadget_ops = {
+	.get_frame = qe_get_frame,
+	.wakeup = qe_wakeup,
+/*	.set_selfpowered = qe_set_selfpowered,*/ /* always selfpowered */
+	.vbus_session = qe_vbus_session,
+	.vbus_draw = qe_vbus_draw,
+	.pullup = qe_pullup,
+	.start = fsl_qe_start,
+	.stop = fsl_qe_stop,
+};
+
+/*-------------------------------------------------------------------------
+	USB ep0 Setup process in BUS Enumeration
+ -------------------------------------------------------------------------*/
+static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
+{
+	struct qe_ep *ep = &udc->eps[pipe];
+
+	nuke(ep, -ECONNRESET);
+	ep->tx_req = NULL;
+	return 0;
+}
+
+static int reset_queues(struct qe_udc *udc)
+{
+	u8 pipe;
+
+	for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
+		udc_reset_ep_queue(udc, pipe);
+
+	/* report disconnect; the driver is already quiesced */
+	spin_unlock(&udc->lock);
+	udc->driver->disconnect(&udc->gadget);
+	spin_lock(&udc->lock);
+
+	return 0;
+}
+
+static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
+			u16 length)
+{
+	/* Save the new address to device struct */
+	udc->device_address = (u8) value;
+	/* Update usb state */
+	udc->usb_state = USB_STATE_ADDRESS;
+
+	/* Status phase , send a ZLP */
+	if (ep0_prime_status(udc, USB_DIR_IN))
+		qe_ep0_stall(udc);
+}
+
+static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct qe_req *req = container_of(_req, struct qe_req, req);
+
+	req->req.buf = NULL;
+	kfree(req);
+}
+
+static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
+			u16 index, u16 length)
+{
+	u16 usb_status = 0;
+	struct qe_req *req;
+	struct qe_ep *ep;
+	int status = 0;
+
+	ep = &udc->eps[0];
+	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		/* Get device status */
+		usb_status = 1 << USB_DEVICE_SELF_POWERED;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+		/* Get interface status */
+		/* We don't have interface information in udc driver */
+		usb_status = 0;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+		/* Get endpoint status */
+		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
+		struct qe_ep *target_ep = &udc->eps[pipe];
+		u16 usep;
+
+		/* stall if endpoint doesn't exist */
+		if (!target_ep->desc)
+			goto stall;
+
+		usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
+		if (index & USB_DIR_IN) {
+			if (target_ep->dir != USB_DIR_IN)
+				goto stall;
+			if ((usep & USB_THS_MASK) == USB_THS_STALL)
+				usb_status = 1 << USB_ENDPOINT_HALT;
+		} else {
+			if (target_ep->dir != USB_DIR_OUT)
+				goto stall;
+			if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
+				usb_status = 1 << USB_ENDPOINT_HALT;
+		}
+	}
+
+	req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
+					struct qe_req, req);
+	req->req.length = 2;
+	req->req.buf = udc->statusbuf;
+	*(u16 *)req->req.buf = cpu_to_le16(usb_status);
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = ownercomplete;
+
+	udc->ep0_dir = USB_DIR_IN;
+
+	/* data phase */
+	status = __qe_ep_queue(&ep->ep, &req->req);
+
+	if (status == 0)
+		return;
+stall:
+	dev_err(udc->dev, "Can't respond to getstatus request \n");
+	qe_ep0_stall(udc);
+}
+
+/* only handle the setup request, suppose the device in normal status */
+static void setup_received_handle(struct qe_udc *udc,
+				struct usb_ctrlrequest *setup)
+{
+	/* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
+	u16 wValue = le16_to_cpu(setup->wValue);
+	u16 wIndex = le16_to_cpu(setup->wIndex);
+	u16 wLength = le16_to_cpu(setup->wLength);
+
+	/* clear the previous request in the ep0 */
+	udc_reset_ep_queue(udc, 0);
+
+	if (setup->bRequestType & USB_DIR_IN)
+		udc->ep0_dir = USB_DIR_IN;
+	else
+		udc->ep0_dir = USB_DIR_OUT;
+
+	switch (setup->bRequest) {
+	case USB_REQ_GET_STATUS:
+		/* Data+Status phase form udc */
+		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+					!= (USB_DIR_IN | USB_TYPE_STANDARD))
+			break;
+		ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
+					wLength);
+		return;
+
+	case USB_REQ_SET_ADDRESS:
+		/* Status phase from udc */
+		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
+						USB_RECIP_DEVICE))
+			break;
+		ch9setaddress(udc, wValue, wIndex, wLength);
+		return;
+
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+		/* Requests with no data phase, status phase from udc */
+		if ((setup->bRequestType & USB_TYPE_MASK)
+					!= USB_TYPE_STANDARD)
+			break;
+
+		if ((setup->bRequestType & USB_RECIP_MASK)
+				== USB_RECIP_ENDPOINT) {
+			int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
+			struct qe_ep *ep;
+
+			if (wValue != 0 || wLength != 0
+				|| pipe > USB_MAX_ENDPOINTS)
+				break;
+			ep = &udc->eps[pipe];
+
+			spin_unlock(&udc->lock);
+			qe_ep_set_halt(&ep->ep,
+					(setup->bRequest == USB_REQ_SET_FEATURE)
+						? 1 : 0);
+			spin_lock(&udc->lock);
+		}
+
+		ep0_prime_status(udc, USB_DIR_IN);
+
+		return;
+
+	default:
+		break;
+	}
+
+	if (wLength) {
+		/* Data phase from gadget, status phase from udc */
+		if (setup->bRequestType & USB_DIR_IN) {
+			udc->ep0_state = DATA_STATE_XMIT;
+			udc->ep0_dir = USB_DIR_IN;
+		} else {
+			udc->ep0_state = DATA_STATE_RECV;
+			udc->ep0_dir = USB_DIR_OUT;
+		}
+		spin_unlock(&udc->lock);
+		if (udc->driver->setup(&udc->gadget,
+					&udc->local_setup_buff) < 0)
+			qe_ep0_stall(udc);
+		spin_lock(&udc->lock);
+	} else {
+		/* No data phase, IN status from gadget */
+		udc->ep0_dir = USB_DIR_IN;
+		spin_unlock(&udc->lock);
+		if (udc->driver->setup(&udc->gadget,
+					&udc->local_setup_buff) < 0)
+			qe_ep0_stall(udc);
+		spin_lock(&udc->lock);
+		udc->ep0_state = DATA_STATE_NEED_ZLP;
+	}
+}
+
+/*-------------------------------------------------------------------------
+	USB Interrupt handlers
+ -------------------------------------------------------------------------*/
+static void suspend_irq(struct qe_udc *udc)
+{
+	udc->resume_state = udc->usb_state;
+	udc->usb_state = USB_STATE_SUSPENDED;
+
+	/* report suspend to the driver ,serial.c not support this*/
+	if (udc->driver->suspend)
+		udc->driver->suspend(&udc->gadget);
+}
+
+static void resume_irq(struct qe_udc *udc)
+{
+	udc->usb_state = udc->resume_state;
+	udc->resume_state = 0;
+
+	/* report resume to the driver , serial.c not support this*/
+	if (udc->driver->resume)
+		udc->driver->resume(&udc->gadget);
+}
+
+static void idle_irq(struct qe_udc *udc)
+{
+	u8 usbs;
+
+	usbs = in_8(&udc->usb_regs->usb_usbs);
+	if (usbs & USB_IDLE_STATUS_MASK) {
+		if ((udc->usb_state) != USB_STATE_SUSPENDED)
+			suspend_irq(udc);
+	} else {
+		if (udc->usb_state == USB_STATE_SUSPENDED)
+			resume_irq(udc);
+	}
+}
+
+static int reset_irq(struct qe_udc *udc)
+{
+	unsigned char i;
+
+	if (udc->usb_state == USB_STATE_DEFAULT)
+		return 0;
+
+	qe_usb_disable();
+	out_8(&udc->usb_regs->usb_usadr, 0);
+
+	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+		if (udc->eps[i].init)
+			qe_ep_reset(udc, i);
+	}
+
+	reset_queues(udc);
+	udc->usb_state = USB_STATE_DEFAULT;
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->ep0_dir = USB_DIR_OUT;
+	qe_usb_enable();
+	return 0;
+}
+
+static int bsy_irq(struct qe_udc *udc)
+{
+	return 0;
+}
+
+static int txe_irq(struct qe_udc *udc)
+{
+	return 0;
+}
+
+/* ep0 tx interrupt also in here */
+static int tx_irq(struct qe_udc *udc)
+{
+	struct qe_ep *ep;
+	struct qe_bd __iomem *bd;
+	int i, res = 0;
+
+	if ((udc->usb_state == USB_STATE_ADDRESS)
+		&& (in_8(&udc->usb_regs->usb_usadr) == 0))
+		out_8(&udc->usb_regs->usb_usadr, udc->device_address);
+
+	for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
+		ep = &udc->eps[i];
+		if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
+			bd = ep->c_txbd;
+			if (!(in_be32((u32 __iomem *)bd) & T_R)
+						&& (in_be32(&bd->buf))) {
+				/* confirm the transmitted bd */
+				if (ep->epnum == 0)
+					res = qe_ep0_txconf(ep);
+				else
+					res = qe_ep_txconf(ep);
+			}
+		}
+	}
+	return res;
+}
+
+
+/* setup packect's rx is handle in the function too */
+static void rx_irq(struct qe_udc *udc)
+{
+	struct qe_ep *ep;
+	struct qe_bd __iomem *bd;
+	int i;
+
+	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+		ep = &udc->eps[i];
+		if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
+			bd = ep->n_rxbd;
+			if (!(in_be32((u32 __iomem *)bd) & R_E)
+						&& (in_be32(&bd->buf))) {
+				if (ep->epnum == 0) {
+					qe_ep0_rx(udc);
+				} else {
+					/*non-setup package receive*/
+					qe_ep_rx(ep);
+				}
+			}
+		}
+	}
+}
+
+static irqreturn_t qe_udc_irq(int irq, void *_udc)
+{
+	struct qe_udc *udc = (struct qe_udc *)_udc;
+	u16 irq_src;
+	irqreturn_t status = IRQ_NONE;
+	unsigned long flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	irq_src = in_be16(&udc->usb_regs->usb_usber) &
+		in_be16(&udc->usb_regs->usb_usbmr);
+	/* Clear notification bits */
+	out_be16(&udc->usb_regs->usb_usber, irq_src);
+	/* USB Interrupt */
+	if (irq_src & USB_E_IDLE_MASK) {
+		idle_irq(udc);
+		irq_src &= ~USB_E_IDLE_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_TXB_MASK) {
+		tx_irq(udc);
+		irq_src &= ~USB_E_TXB_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_RXB_MASK) {
+		rx_irq(udc);
+		irq_src &= ~USB_E_RXB_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_RESET_MASK) {
+		reset_irq(udc);
+		irq_src &= ~USB_E_RESET_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_BSY_MASK) {
+		bsy_irq(udc);
+		irq_src &= ~USB_E_BSY_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & USB_E_TXE_MASK) {
+		txe_irq(udc);
+		irq_src &= ~USB_E_TXE_MASK;
+		status = IRQ_HANDLED;
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return status;
+}
+
+/*-------------------------------------------------------------------------
+	Gadget driver probe and unregister.
+ --------------------------------------------------------------------------*/
+static int fsl_qe_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	int retval;
+	unsigned long flags = 0;
+
+	/* standard operations */
+	if (!udc_controller)
+		return -ENODEV;
+
+	if (!driver || driver->max_speed < USB_SPEED_FULL
+			|| !bind || !driver->disconnect || !driver->setup)
+		return -EINVAL;
+
+	if (udc_controller->driver)
+		return -EBUSY;
+
+	/* lock is needed but whether should use this lock or another */
+	spin_lock_irqsave(&udc_controller->lock, flags);
+
+	driver->driver.bus = NULL;
+	/* hook up the driver */
+	udc_controller->driver = driver;
+	udc_controller->gadget.dev.driver = &driver->driver;
+	udc_controller->gadget.speed = driver->max_speed;
+	spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+	retval = bind(&udc_controller->gadget);
+	if (retval) {
+		dev_err(udc_controller->dev, "bind to %s --> %d",
+				driver->driver.name, retval);
+		udc_controller->gadget.dev.driver = NULL;
+		udc_controller->driver = NULL;
+		return retval;
+	}
+
+	/* Enable IRQ reg and Set usbcmd reg EN bit */
+	qe_usb_enable();
+
+	out_be16(&udc_controller->usb_regs->usb_usber, 0xffff);
+	out_be16(&udc_controller->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
+	udc_controller->usb_state = USB_STATE_ATTACHED;
+	udc_controller->ep0_state = WAIT_FOR_SETUP;
+	udc_controller->ep0_dir = USB_DIR_OUT;
+	dev_info(udc_controller->dev, "%s bind to driver %s \n",
+		udc_controller->gadget.name, driver->driver.name);
+	return 0;
+}
+
+static int fsl_qe_stop(struct usb_gadget_driver *driver)
+{
+	struct qe_ep *loop_ep;
+	unsigned long flags;
+
+	if (!udc_controller)
+		return -ENODEV;
+
+	if (!driver || driver != udc_controller->driver)
+		return -EINVAL;
+
+	/* stop usb controller, disable intr */
+	qe_usb_disable();
+
+	/* in fact, no needed */
+	udc_controller->usb_state = USB_STATE_ATTACHED;
+	udc_controller->ep0_state = WAIT_FOR_SETUP;
+	udc_controller->ep0_dir = 0;
+
+	/* stand operation */
+	spin_lock_irqsave(&udc_controller->lock, flags);
+	udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+	nuke(&udc_controller->eps[0], -ESHUTDOWN);
+	list_for_each_entry(loop_ep, &udc_controller->gadget.ep_list,
+				ep.ep_list)
+		nuke(loop_ep, -ESHUTDOWN);
+	spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+	/* report disconnect; the controller is already quiesced */
+	driver->disconnect(&udc_controller->gadget);
+
+	/* unbind gadget and unhook driver. */
+	driver->unbind(&udc_controller->gadget);
+	udc_controller->gadget.dev.driver = NULL;
+	udc_controller->driver = NULL;
+
+	dev_info(udc_controller->dev, "unregistered gadget driver '%s'\r\n",
+			driver->driver.name);
+	return 0;
+}
+
+/* udc structure's alloc and setup, include ep-param alloc */
+static struct qe_udc __devinit *qe_udc_config(struct platform_device *ofdev)
+{
+	struct qe_udc *udc;
+	struct device_node *np = ofdev->dev.of_node;
+	unsigned int tmp_addr = 0;
+	struct usb_device_para __iomem *usbpram;
+	unsigned int i;
+	u64 size;
+	u32 offset;
+
+	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+	if (udc == NULL) {
+		dev_err(&ofdev->dev, "malloc udc failed\n");
+		goto cleanup;
+	}
+
+	udc->dev = &ofdev->dev;
+
+	/* get default address of usb parameter in MURAM from device tree */
+	offset = *of_get_address(np, 1, &size, NULL);
+	udc->usb_param = cpm_muram_addr(offset);
+	memset_io(udc->usb_param, 0, size);
+
+	usbpram = udc->usb_param;
+	out_be16(&usbpram->frame_n, 0);
+	out_be32(&usbpram->rstate, 0);
+
+	tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
+					sizeof(struct usb_ep_para)),
+					   USB_EP_PARA_ALIGNMENT);
+	if (IS_ERR_VALUE(tmp_addr))
+		goto cleanup;
+
+	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+		out_be16(&usbpram->epptr[i], (u16)tmp_addr);
+		udc->ep_param[i] = cpm_muram_addr(tmp_addr);
+		tmp_addr += 32;
+	}
+
+	memset_io(udc->ep_param[0], 0,
+			USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
+
+	udc->resume_state = USB_STATE_NOTATTACHED;
+	udc->usb_state = USB_STATE_POWERED;
+	udc->ep0_dir = 0;
+
+	spin_lock_init(&udc->lock);
+	return udc;
+
+cleanup:
+	kfree(udc);
+	return NULL;
+}
+
+/* USB Controller register init */
+static int __devinit qe_udc_reg_init(struct qe_udc *udc)
+{
+	struct usb_ctlr __iomem *qe_usbregs;
+	qe_usbregs = udc->usb_regs;
+
+	/* Spec says that we must enable the USB controller to change mode. */
+	out_8(&qe_usbregs->usb_usmod, 0x01);
+	/* Mode changed, now disable it, since muram isn't initialized yet. */
+	out_8(&qe_usbregs->usb_usmod, 0x00);
+
+	/* Initialize the rest. */
+	out_be16(&qe_usbregs->usb_usbmr, 0);
+	out_8(&qe_usbregs->usb_uscom, 0);
+	out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
+
+	return 0;
+}
+
+static int __devinit qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
+{
+	struct qe_ep *ep = &udc->eps[pipe_num];
+
+	ep->udc = udc;
+	strcpy(ep->name, ep_name[pipe_num]);
+	ep->ep.name = ep_name[pipe_num];
+
+	ep->ep.ops = &qe_ep_ops;
+	ep->stopped = 1;
+	ep->ep.maxpacket = (unsigned short) ~0;
+	ep->desc = NULL;
+	ep->dir = 0xff;
+	ep->epnum = (u8)pipe_num;
+	ep->sent = 0;
+	ep->last = 0;
+	ep->init = 0;
+	ep->rxframe = NULL;
+	ep->txframe = NULL;
+	ep->tx_req = NULL;
+	ep->state = EP_STATE_IDLE;
+	ep->has_data = 0;
+
+	/* the queue lists any req for this ep */
+	INIT_LIST_HEAD(&ep->queue);
+
+	/* gagdet.ep_list used for ep_autoconfig so no ep0*/
+	if (pipe_num != 0)
+		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+
+	ep->gadget = &udc->gadget;
+
+	return 0;
+}
+
+/*-----------------------------------------------------------------------
+ *	UDC device Driver operation functions				*
+ *----------------------------------------------------------------------*/
+static void qe_udc_release(struct device *dev)
+{
+	int i = 0;
+
+	complete(udc_controller->done);
+	cpm_muram_free(cpm_muram_offset(udc_controller->ep_param[0]));
+	for (i = 0; i < USB_MAX_ENDPOINTS; i++)
+		udc_controller->ep_param[i] = NULL;
+
+	kfree(udc_controller);
+	udc_controller = NULL;
+}
+
+/* Driver probe functions */
+static const struct of_device_id qe_udc_match[];
+static int __devinit qe_udc_probe(struct platform_device *ofdev)
+{
+	const struct of_device_id *match;
+	struct device_node *np = ofdev->dev.of_node;
+	struct qe_ep *ep;
+	unsigned int ret = 0;
+	unsigned int i;
+	const void *prop;
+
+	match = of_match_device(qe_udc_match, &ofdev->dev);
+	if (!match)
+		return -EINVAL;
+
+	prop = of_get_property(np, "mode", NULL);
+	if (!prop || strcmp(prop, "peripheral"))
+		return -ENODEV;
+
+	/* Initialize the udc structure including QH member and other member */
+	udc_controller = qe_udc_config(ofdev);
+	if (!udc_controller) {
+		dev_err(&ofdev->dev, "failed to initialize\n");
+		return -ENOMEM;
+	}
+
+	udc_controller->soc_type = (unsigned long)match->data;
+	udc_controller->usb_regs = of_iomap(np, 0);
+	if (!udc_controller->usb_regs) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	/* initialize usb hw reg except for regs for EP,
+	 * leave usbintr reg untouched*/
+	qe_udc_reg_init(udc_controller);
+
+	/* here comes the stand operations for probe
+	 * set the qe_udc->gadget.xxx */
+	udc_controller->gadget.ops = &qe_gadget_ops;
+
+	/* gadget.ep0 is a pointer */
+	udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
+
+	INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
+
+	/* modify in register gadget process */
+	udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+
+	/* name: Identifies the controller hardware type. */
+	udc_controller->gadget.name = driver_name;
+
+	device_initialize(&udc_controller->gadget.dev);
+
+	dev_set_name(&udc_controller->gadget.dev, "gadget");
+
+	udc_controller->gadget.dev.release = qe_udc_release;
+	udc_controller->gadget.dev.parent = &ofdev->dev;
+
+	/* initialize qe_ep struct */
+	for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
+		/* because the ep type isn't decide here so
+		 * qe_ep_init() should be called in ep_enable() */
+
+		/* setup the qe_ep struct and link ep.ep.list
+		 * into gadget.ep_list */
+		qe_ep_config(udc_controller, (unsigned char)i);
+	}
+
+	/* ep0 initialization in here */
+	ret = qe_ep_init(udc_controller, 0, &qe_ep0_desc);
+	if (ret)
+		goto err2;
+
+	/* create a buf for ZLP send, need to remain zeroed */
+	udc_controller->nullbuf = kzalloc(256, GFP_KERNEL);
+	if (udc_controller->nullbuf == NULL) {
+		dev_err(udc_controller->dev, "cannot alloc nullbuf\n");
+		ret = -ENOMEM;
+		goto err3;
+	}
+
+	/* buffer for data of get_status request */
+	udc_controller->statusbuf = kzalloc(2, GFP_KERNEL);
+	if (udc_controller->statusbuf == NULL) {
+		ret = -ENOMEM;
+		goto err4;
+	}
+
+	udc_controller->nullp = virt_to_phys((void *)udc_controller->nullbuf);
+	if (udc_controller->nullp == DMA_ADDR_INVALID) {
+		udc_controller->nullp = dma_map_single(
+					udc_controller->gadget.dev.parent,
+					udc_controller->nullbuf,
+					256,
+					DMA_TO_DEVICE);
+		udc_controller->nullmap = 1;
+	} else {
+		dma_sync_single_for_device(udc_controller->gadget.dev.parent,
+					udc_controller->nullp, 256,
+					DMA_TO_DEVICE);
+	}
+
+	tasklet_init(&udc_controller->rx_tasklet, ep_rx_tasklet,
+			(unsigned long)udc_controller);
+	/* request irq and disable DR  */
+	udc_controller->usb_irq = irq_of_parse_and_map(np, 0);
+	if (!udc_controller->usb_irq) {
+		ret = -EINVAL;
+		goto err_noirq;
+	}
+
+	ret = request_irq(udc_controller->usb_irq, qe_udc_irq, 0,
+				driver_name, udc_controller);
+	if (ret) {
+		dev_err(udc_controller->dev, "cannot request irq %d err %d \n",
+			udc_controller->usb_irq, ret);
+		goto err5;
+	}
+
+	ret = device_add(&udc_controller->gadget.dev);
+	if (ret)
+		goto err6;
+
+	ret = usb_add_gadget_udc(&ofdev->dev, &udc_controller->gadget);
+	if (ret)
+		goto err7;
+
+	dev_info(udc_controller->dev,
+			"%s USB controller initialized as device\n",
+			(udc_controller->soc_type == PORT_QE) ? "QE" : "CPM");
+	return 0;
+
+err7:
+	device_unregister(&udc_controller->gadget.dev);
+err6:
+	free_irq(udc_controller->usb_irq, udc_controller);
+err5:
+	irq_dispose_mapping(udc_controller->usb_irq);
+err_noirq:
+	if (udc_controller->nullmap) {
+		dma_unmap_single(udc_controller->gadget.dev.parent,
+			udc_controller->nullp, 256,
+				DMA_TO_DEVICE);
+			udc_controller->nullp = DMA_ADDR_INVALID;
+	} else {
+		dma_sync_single_for_cpu(udc_controller->gadget.dev.parent,
+			udc_controller->nullp, 256,
+				DMA_TO_DEVICE);
+	}
+	kfree(udc_controller->statusbuf);
+err4:
+	kfree(udc_controller->nullbuf);
+err3:
+	ep = &udc_controller->eps[0];
+	cpm_muram_free(cpm_muram_offset(ep->rxbase));
+	kfree(ep->rxframe);
+	kfree(ep->rxbuffer);
+	kfree(ep->txframe);
+err2:
+	iounmap(udc_controller->usb_regs);
+err1:
+	kfree(udc_controller);
+	udc_controller = NULL;
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
+{
+	return -ENOTSUPP;
+}
+
+static int qe_udc_resume(struct platform_device *dev)
+{
+	return -ENOTSUPP;
+}
+#endif
+
+static int __devexit qe_udc_remove(struct platform_device *ofdev)
+{
+	struct qe_ep *ep;
+	unsigned int size;
+
+	DECLARE_COMPLETION(done);
+
+	if (!udc_controller)
+		return -ENODEV;
+
+	usb_del_gadget_udc(&udc_controller->gadget);
+
+	udc_controller->done = &done;
+	tasklet_disable(&udc_controller->rx_tasklet);
+
+	if (udc_controller->nullmap) {
+		dma_unmap_single(udc_controller->gadget.dev.parent,
+			udc_controller->nullp, 256,
+				DMA_TO_DEVICE);
+			udc_controller->nullp = DMA_ADDR_INVALID;
+	} else {
+		dma_sync_single_for_cpu(udc_controller->gadget.dev.parent,
+			udc_controller->nullp, 256,
+				DMA_TO_DEVICE);
+	}
+	kfree(udc_controller->statusbuf);
+	kfree(udc_controller->nullbuf);
+
+	ep = &udc_controller->eps[0];
+	cpm_muram_free(cpm_muram_offset(ep->rxbase));
+	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
+
+	kfree(ep->rxframe);
+	if (ep->rxbufmap) {
+		dma_unmap_single(udc_controller->gadget.dev.parent,
+				ep->rxbuf_d, size,
+				DMA_FROM_DEVICE);
+		ep->rxbuf_d = DMA_ADDR_INVALID;
+	} else {
+		dma_sync_single_for_cpu(udc_controller->gadget.dev.parent,
+				ep->rxbuf_d, size,
+				DMA_FROM_DEVICE);
+	}
+
+	kfree(ep->rxbuffer);
+	kfree(ep->txframe);
+
+	free_irq(udc_controller->usb_irq, udc_controller);
+	irq_dispose_mapping(udc_controller->usb_irq);
+
+	tasklet_kill(&udc_controller->rx_tasklet);
+
+	iounmap(udc_controller->usb_regs);
+
+	device_unregister(&udc_controller->gadget.dev);
+	/* wait for release() of gadget.dev to free udc */
+	wait_for_completion(&done);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static const struct of_device_id qe_udc_match[] __devinitconst = {
+	{
+		.compatible = "fsl,mpc8323-qe-usb",
+		.data = (void *)PORT_QE,
+	},
+	{
+		.compatible = "fsl,mpc8360-qe-usb",
+		.data = (void *)PORT_QE,
+	},
+	{
+		.compatible = "fsl,mpc8272-cpm-usb",
+		.data = (void *)PORT_CPM,
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, qe_udc_match);
+
+static struct platform_driver udc_driver = {
+	.driver = {
+		.name = (char *)driver_name,
+		.owner = THIS_MODULE,
+		.of_match_table = qe_udc_match,
+	},
+	.probe          = qe_udc_probe,
+	.remove         = __devexit_p(qe_udc_remove),
+#ifdef CONFIG_PM
+	.suspend        = qe_udc_suspend,
+	.resume         = qe_udc_resume,
+#endif
+};
+
+module_platform_driver(udc_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_qe_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_qe_udc.h
new file mode 100644
index 0000000..1da5fb0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_qe_udc.h
@@ -0,0 +1,422 @@
+/*
+ * drivers/usb/gadget/qe_udc.h
+ *
+ * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * 	Xiaobo Xie <X.Xie@freescale.com>
+ * 	Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * Freescale USB device/endpoint management registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __FSL_QE_UDC_H
+#define __FSL_QE_UDC_H
+
+/* SoC type */
+#define PORT_CPM	0
+#define PORT_QE		1
+
+#define USB_MAX_ENDPOINTS               4
+#define USB_MAX_PIPES                   USB_MAX_ENDPOINTS
+#define USB_EP0_MAX_SIZE		64
+#define USB_MAX_CTRL_PAYLOAD            0x4000
+#define USB_BDRING_LEN			16
+#define USB_BDRING_LEN_RX		256
+#define USB_BDRING_LEN_TX		16
+#define MIN_EMPTY_BDS			128
+#define MAX_DATA_BDS			8
+#define USB_CRC_SIZE			2
+#define USB_DIR_BOTH			0x88
+#define R_BUF_MAXSIZE			0x800
+#define USB_EP_PARA_ALIGNMENT		32
+
+/* USB Mode Register bit define */
+#define USB_MODE_EN		0x01
+#define USB_MODE_HOST		0x02
+#define USB_MODE_TEST		0x04
+#define USB_MODE_SFTE		0x08
+#define USB_MODE_RESUME		0x40
+#define USB_MODE_LSS		0x80
+
+/* USB Slave Address Register Mask */
+#define USB_SLVADDR_MASK	0x7F
+
+/* USB Endpoint register define */
+#define USB_EPNUM_MASK		0xF000
+#define USB_EPNUM_SHIFT		12
+
+#define USB_TRANS_MODE_SHIFT	8
+#define USB_TRANS_CTR		0x0000
+#define USB_TRANS_INT		0x0100
+#define USB_TRANS_BULK		0x0200
+#define USB_TRANS_ISO		0x0300
+
+#define USB_EP_MF		0x0020
+#define USB_EP_RTE		0x0010
+
+#define USB_THS_SHIFT		2
+#define USB_THS_MASK		0x000c
+#define USB_THS_NORMAL		0x0
+#define USB_THS_IGNORE_IN	0x0004
+#define USB_THS_NACK		0x0008
+#define USB_THS_STALL		0x000c
+
+#define USB_RHS_SHIFT   	0
+#define USB_RHS_MASK		0x0003
+#define USB_RHS_NORMAL  	0x0
+#define USB_RHS_IGNORE_OUT	0x0001
+#define USB_RHS_NACK		0x0002
+#define USB_RHS_STALL		0x0003
+
+#define USB_RTHS_MASK		0x000f
+
+/* USB Command Register define */
+#define USB_CMD_STR_FIFO	0x80
+#define USB_CMD_FLUSH_FIFO	0x40
+#define USB_CMD_ISFT		0x20
+#define USB_CMD_DSFT		0x10
+#define USB_CMD_EP_MASK		0x03
+
+/* USB Event and Mask Register define */
+#define USB_E_MSF_MASK		0x0800
+#define USB_E_SFT_MASK		0x0400
+#define USB_E_RESET_MASK	0x0200
+#define USB_E_IDLE_MASK		0x0100
+#define USB_E_TXE4_MASK		0x0080
+#define USB_E_TXE3_MASK		0x0040
+#define USB_E_TXE2_MASK		0x0020
+#define USB_E_TXE1_MASK		0x0010
+#define USB_E_SOF_MASK		0x0008
+#define USB_E_BSY_MASK		0x0004
+#define USB_E_TXB_MASK		0x0002
+#define USB_E_RXB_MASK		0x0001
+#define USBER_ALL_CLEAR 	0x0fff
+
+#define USB_E_DEFAULT_DEVICE   (USB_E_RESET_MASK | USB_E_TXE4_MASK | \
+				USB_E_TXE3_MASK | USB_E_TXE2_MASK | \
+				USB_E_TXE1_MASK | USB_E_BSY_MASK | \
+				USB_E_TXB_MASK | USB_E_RXB_MASK)
+
+#define USB_E_TXE_MASK         (USB_E_TXE4_MASK | USB_E_TXE3_MASK|\
+				 USB_E_TXE2_MASK | USB_E_TXE1_MASK)
+/* USB Status Register define */
+#define USB_IDLE_STATUS_MASK	0x01
+
+/* USB Start of Frame Timer */
+#define USB_USSFT_MASK		0x3FFF
+
+/* USB Frame Number Register */
+#define USB_USFRN_MASK		0xFFFF
+
+struct usb_device_para{
+	u16	epptr[4];
+	u32	rstate;
+	u32	rptr;
+	u16	frame_n;
+	u16	rbcnt;
+	u32	rtemp;
+	u32	rxusb_data;
+	u16	rxuptr;
+	u8	reso[2];
+	u32	softbl;
+	u8	sofucrctemp;
+};
+
+struct usb_ep_para{
+	u16	rbase;
+	u16	tbase;
+	u8	rbmr;
+	u8	tbmr;
+	u16	mrblr;
+	u16	rbptr;
+	u16	tbptr;
+	u32	tstate;
+	u32	tptr;
+	u16	tcrc;
+	u16	tbcnt;
+	u32	ttemp;
+	u16	txusbu_ptr;
+	u8	reserve[2];
+};
+
+#define USB_BUSMODE_GBL		0x20
+#define USB_BUSMODE_BO_MASK	0x18
+#define USB_BUSMODE_BO_SHIFT	0x3
+#define USB_BUSMODE_BE		0x2
+#define USB_BUSMODE_CETM	0x04
+#define USB_BUSMODE_DTB		0x02
+
+/* Endpoint basic handle */
+#define ep_index(EP)		((EP)->desc->bEndpointAddress & 0xF)
+#define ep_maxpacket(EP)	((EP)->ep.maxpacket)
+#define ep_is_in(EP)	((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
+			USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+			& USB_DIR_IN) == USB_DIR_IN)
+
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP          0
+#define DATA_STATE_XMIT         1
+#define DATA_STATE_NEED_ZLP     2
+#define WAIT_FOR_OUT_STATUS     3
+#define DATA_STATE_RECV         4
+
+/* ep tramsfer mode */
+#define USBP_TM_CTL	0
+#define USBP_TM_ISO	1
+#define USBP_TM_BULK	2
+#define USBP_TM_INT	3
+
+/*-----------------------------------------------------------------------------
+	USB RX And TX DATA Frame
+ -----------------------------------------------------------------------------*/
+struct qe_frame{
+	u8 *data;
+	u32 len;
+	u32 status;
+	u32 info;
+
+	void *privdata;
+	struct list_head node;
+};
+
+/* Frame structure, info field. */
+#define PID_DATA0              0x80000000 /* Data toggle zero */
+#define PID_DATA1              0x40000000 /* Data toggle one  */
+#define PID_SETUP              0x20000000 /* setup bit */
+#define SETUP_STATUS           0x10000000 /* setup status bit */
+#define SETADDR_STATUS         0x08000000 /* setupup address status bit */
+#define NO_REQ                 0x04000000 /* Frame without request */
+#define HOST_DATA              0x02000000 /* Host data frame */
+#define FIRST_PACKET_IN_FRAME  0x01000000 /* first packet in the frame */
+#define TOKEN_FRAME            0x00800000 /* Host token frame */
+#define ZLP                    0x00400000 /* Zero length packet */
+#define IN_TOKEN_FRAME         0x00200000 /* In token package */
+#define OUT_TOKEN_FRAME        0x00100000 /* Out token package */
+#define SETUP_TOKEN_FRAME      0x00080000 /* Setup token package */
+#define STALL_FRAME            0x00040000 /* Stall handshake */
+#define NACK_FRAME             0x00020000 /* Nack handshake */
+#define NO_PID                 0x00010000 /* No send PID */
+#define NO_CRC                 0x00008000 /* No send CRC */
+#define HOST_COMMAND           0x00004000 /* Host command frame   */
+
+/* Frame status field */
+/* Receive side */
+#define FRAME_OK               0x00000000 /* Frame transmitted or received OK */
+#define FRAME_ERROR            0x80000000 /* Error occurred on frame */
+#define START_FRAME_LOST       0x40000000 /* START_FRAME_LOST */
+#define END_FRAME_LOST         0x20000000 /* END_FRAME_LOST */
+#define RX_ER_NONOCT           0x10000000 /* Rx Non Octet Aligned Packet */
+#define RX_ER_BITSTUFF         0x08000000 /* Frame Aborted --Received packet
+					     with bit stuff error */
+#define RX_ER_CRC              0x04000000 /* Received packet with CRC error */
+#define RX_ER_OVERUN           0x02000000 /* Over-run occurred on reception */
+#define RX_ER_PID              0x01000000 /* Wrong PID received */
+/* Tranmit side */
+#define TX_ER_NAK              0x00800000 /* Received NAK handshake */
+#define TX_ER_STALL            0x00400000 /* Received STALL handshake */
+#define TX_ER_TIMEOUT          0x00200000 /* Transmit time out */
+#define TX_ER_UNDERUN          0x00100000 /* Transmit underrun */
+#define FRAME_INPROGRESS       0x00080000 /* Frame is being transmitted */
+#define ER_DATA_UNDERUN        0x00040000 /* Frame is shorter then expected */
+#define ER_DATA_OVERUN         0x00020000 /* Frame is longer then expected */
+
+/* QE USB frame operation functions */
+#define frame_get_length(frm) (frm->len)
+#define frame_set_length(frm, leng) (frm->len = leng)
+#define frame_get_data(frm) (frm->data)
+#define frame_set_data(frm, dat) (frm->data = dat)
+#define frame_get_info(frm) (frm->info)
+#define frame_set_info(frm, inf) (frm->info = inf)
+#define frame_get_status(frm) (frm->status)
+#define frame_set_status(frm, stat) (frm->status = stat)
+#define frame_get_privdata(frm) (frm->privdata)
+#define frame_set_privdata(frm, dat) (frm->privdata = dat)
+
+static inline void qe_frame_clean(struct qe_frame *frm)
+{
+	frame_set_data(frm, NULL);
+	frame_set_length(frm, 0);
+	frame_set_status(frm, FRAME_OK);
+	frame_set_info(frm, 0);
+	frame_set_privdata(frm, NULL);
+}
+
+static inline void qe_frame_init(struct qe_frame *frm)
+{
+	qe_frame_clean(frm);
+	INIT_LIST_HEAD(&(frm->node));
+}
+
+struct qe_req {
+	struct usb_request req;
+	struct list_head queue;
+	/* ep_queue() func will add
+	 a request->queue into a udc_ep->queue 'd tail */
+	struct qe_ep *ep;
+	unsigned mapped:1;
+};
+
+struct qe_ep {
+	struct usb_ep ep;
+	struct list_head queue;
+	struct qe_udc *udc;
+	const struct usb_endpoint_descriptor *desc;
+	struct usb_gadget *gadget;
+
+	u8 state;
+
+	struct qe_bd __iomem *rxbase;
+	struct qe_bd __iomem *n_rxbd;
+	struct qe_bd __iomem *e_rxbd;
+
+	struct qe_bd __iomem *txbase;
+	struct qe_bd __iomem *n_txbd;
+	struct qe_bd __iomem *c_txbd;
+
+	struct qe_frame *rxframe;
+	u8 *rxbuffer;
+	dma_addr_t rxbuf_d;
+	u8 rxbufmap;
+	unsigned char localnack;
+	int has_data;
+
+	struct qe_frame *txframe;
+	struct qe_req *tx_req;
+	int sent;  /*data already sent */
+	int last;  /*data sent in the last time*/
+
+	u8 dir;
+	u8 epnum;
+	u8 tm; /* transfer mode */
+	u8 data01;
+	u8 init;
+
+	u8 already_seen;
+	u8 enable_tasklet;
+	u8 setup_stage;
+	u32 last_io;            /* timestamp */
+
+	char name[14];
+
+	unsigned double_buf:1;
+	unsigned stopped:1;
+	unsigned fnf:1;
+	unsigned has_dma:1;
+
+	u8 ackwait;
+	u8 dma_channel;
+	u16 dma_counter;
+	int lch;
+
+	struct timer_list timer;
+};
+
+struct qe_udc {
+	struct usb_gadget gadget;
+	struct usb_gadget_driver *driver;
+	struct device *dev;
+	struct qe_ep eps[USB_MAX_ENDPOINTS];
+	struct usb_ctrlrequest local_setup_buff;
+	spinlock_t lock;	/* lock for set/config qe_udc */
+	unsigned long soc_type;		/* QE or CPM soc */
+
+	struct qe_req *status_req;	/* ep0 status request */
+
+	/* USB and EP Parameter Block pointer */
+	struct usb_device_para __iomem *usb_param;
+	struct usb_ep_para __iomem *ep_param[4];
+
+	u32 max_pipes;          /* Device max pipes */
+	u32 max_use_endpts;     /* Max endpointes to be used */
+	u32 bus_reset;          /* Device is bus reseting */
+	u32 resume_state;       /* USB state to resume*/
+	u32 usb_state;          /* USB current state */
+	u32 usb_next_state;     /* USB next state */
+	u32 ep0_state;          /* Enpoint zero state */
+	u32 ep0_dir;            /* Enpoint zero direction: can be
+				USB_DIR_IN or USB_DIR_OUT*/
+	u32 usb_sof_count;      /* SOF count */
+	u32 errors;             /* USB ERRORs count */
+
+	u8 *tmpbuf;
+	u32 c_start;
+	u32 c_end;
+
+	u8 *nullbuf;
+	u8 *statusbuf;
+	dma_addr_t nullp;
+	u8 nullmap;
+	u8 device_address;	/* Device USB address */
+
+	unsigned int usb_clock;
+	unsigned int usb_irq;
+	struct usb_ctlr __iomem *usb_regs;
+
+	struct tasklet_struct rx_tasklet;
+
+	struct completion *done;	/* to make sure release() is done */
+};
+
+#define EP_STATE_IDLE	0
+#define EP_STATE_NACK	1
+#define EP_STATE_STALL	2
+
+/*
+ * transmit BD's status
+ */
+#define T_R           0x80000000         /* ready bit */
+#define T_W           0x20000000         /* wrap bit */
+#define T_I           0x10000000         /* interrupt on completion */
+#define T_L           0x08000000         /* last */
+#define T_TC          0x04000000         /* transmit CRC */
+#define T_CNF         0x02000000         /* wait for  transmit confirm */
+#define T_LSP         0x01000000         /* Low-speed transaction */
+#define T_PID         0x00c00000         /* packet id */
+#define T_NAK         0x00100000         /* No ack. */
+#define T_STAL        0x00080000         /* Stall received */
+#define T_TO          0x00040000         /* time out */
+#define T_UN          0x00020000         /* underrun */
+
+#define DEVICE_T_ERROR    (T_UN | T_TO)
+#define HOST_T_ERROR      (T_UN | T_TO | T_NAK | T_STAL)
+#define DEVICE_T_BD_MASK  DEVICE_T_ERROR
+#define HOST_T_BD_MASK    HOST_T_ERROR
+
+#define T_PID_SHIFT   6
+#define T_PID_DATA0   0x00800000         /* Data 0 toggle */
+#define T_PID_DATA1   0x00c00000         /* Data 1 toggle */
+
+/*
+ * receive BD's status
+ */
+#define R_E           0x80000000         /* buffer empty */
+#define R_W           0x20000000         /* wrap bit */
+#define R_I           0x10000000         /* interrupt on reception */
+#define R_L           0x08000000         /* last */
+#define R_F           0x04000000         /* first */
+#define R_PID         0x00c00000         /* packet id */
+#define R_NO          0x00100000         /* Rx Non Octet Aligned Packet */
+#define R_AB          0x00080000         /* Frame Aborted */
+#define R_CR          0x00040000         /* CRC Error */
+#define R_OV          0x00020000         /* Overrun */
+
+#define R_ERROR       (R_NO | R_AB | R_CR | R_OV)
+#define R_BD_MASK     R_ERROR
+
+#define R_PID_DATA0   0x00000000
+#define R_PID_DATA1   0x00400000
+#define R_PID_SETUP   0x00800000
+
+#define CPM_USB_STOP_TX 0x2e600000
+#define CPM_USB_RESTART_TX 0x2e600000
+#define CPM_USB_STOP_TX_OPCODE 0x0a
+#define CPM_USB_RESTART_TX_OPCODE 0x0b
+#define CPM_USB_EP_SHIFT 5
+
+#endif  /* __FSL_QE_UDC_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_udc_core.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_udc_core.c
new file mode 100644
index 0000000..188a89f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_udc_core.c
@@ -0,0 +1,2778 @@
+/*
+ * Copyright (C) 2004-2007,2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ *         Jiang Bo <tanya.jiang@freescale.com>
+ *
+ * Description:
+ * Freescale high-speed USB SOC DR module device controller driver.
+ * This can be found on MPC8349E/MPC8313E/MPC5121E cpus.
+ * The driver is previously named as mpc_udc.  Based on bare board
+ * code from Dave Liu and Shlomi Gridish.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#undef VERBOSE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+#include <asm/dma.h>
+
+#include "fsl_usb2_udc.h"
+
+#define	DRIVER_DESC	"Freescale High-Speed USB SOC Device Controller driver"
+#define	DRIVER_AUTHOR	"Li Yang/Jiang Bo"
+#define	DRIVER_VERSION	"Apr 20, 2007"
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+static const char driver_name[] = "fsl-usb2-udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+static struct usb_dr_device *dr_regs;
+#ifndef CONFIG_ARCH_MXC
+static struct usb_sys_interface *usb_sys_regs;
+#endif
+
+/* it is initialized in probe()  */
+static struct fsl_udc *udc_controller = NULL;
+
+static const struct usb_endpoint_descriptor
+fsl_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
+};
+
+static void fsl_ep_fifo_flush(struct usb_ep *_ep);
+
+#ifdef CONFIG_PPC32
+/*
+ * On some SoCs, the USB controller registers can be big or little endian,
+ * depending on the version of the chip. In order to be able to run the
+ * same kernel binary on 2 different versions of an SoC, the BE/LE decision
+ * must be made at run time. _fsl_readl and fsl_writel are pointers to the
+ * BE or LE readl() and writel() functions, and fsl_readl() and fsl_writel()
+ * call through those pointers. Platform code for SoCs that have BE USB
+ * registers should set pdata->big_endian_mmio flag.
+ *
+ * This also applies to controller-to-cpu accessors for the USB descriptors,
+ * since their endianness is also SoC dependant. Platform code for SoCs that
+ * have BE USB descriptors should set pdata->big_endian_desc flag.
+ */
+static u32 _fsl_readl_be(const unsigned __iomem *p)
+{
+	return in_be32(p);
+}
+
+static u32 _fsl_readl_le(const unsigned __iomem *p)
+{
+	return in_le32(p);
+}
+
+static void _fsl_writel_be(u32 v, unsigned __iomem *p)
+{
+	out_be32(p, v);
+}
+
+static void _fsl_writel_le(u32 v, unsigned __iomem *p)
+{
+	out_le32(p, v);
+}
+
+static u32 (*_fsl_readl)(const unsigned __iomem *p);
+static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
+
+#define fsl_readl(p)		(*_fsl_readl)((p))
+#define fsl_writel(v, p)	(*_fsl_writel)((v), (p))
+
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata)
+{
+	if (pdata->big_endian_mmio) {
+		_fsl_readl = _fsl_readl_be;
+		_fsl_writel = _fsl_writel_be;
+	} else {
+		_fsl_readl = _fsl_readl_le;
+		_fsl_writel = _fsl_writel_le;
+	}
+}
+
+static inline u32 cpu_to_hc32(const u32 x)
+{
+	return udc_controller->pdata->big_endian_desc
+		? (__force u32)cpu_to_be32(x)
+		: (__force u32)cpu_to_le32(x);
+}
+
+static inline u32 hc32_to_cpu(const u32 x)
+{
+	return udc_controller->pdata->big_endian_desc
+		? be32_to_cpu((__force __be32)x)
+		: le32_to_cpu((__force __le32)x);
+}
+#else /* !CONFIG_PPC32 */
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
+
+#define fsl_readl(addr)		readl(addr)
+#define fsl_writel(val32, addr) writel(val32, addr)
+#define cpu_to_hc32(x)		cpu_to_le32(x)
+#define hc32_to_cpu(x)		le32_to_cpu(x)
+#endif /* CONFIG_PPC32 */
+
+/********************************************************************
+ *	Internal Used Function
+********************************************************************/
+/*-----------------------------------------------------------------
+ * done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ *	request is still in progress.
+ *--------------------------------------------------------------*/
+static void done(struct fsl_ep *ep, struct fsl_req *req, int status)
+{
+	struct fsl_udc *udc = NULL;
+	unsigned char stopped = ep->stopped;
+	struct ep_td_struct *curr_td, *next_td;
+	int j;
+
+	udc = (struct fsl_udc *)ep->udc;
+	/* Removed the req from fsl_ep->queue */
+	list_del_init(&req->queue);
+
+	/* req.status should be set as -EINPROGRESS in ep_queue() */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	/* Free dtd for the request */
+	next_td = req->head;
+	for (j = 0; j < req->dtd_count; j++) {
+		curr_td = next_td;
+		if (j != req->dtd_count - 1) {
+			next_td = curr_td->next_td_virt;
+		}
+		dma_pool_free(udc->td_pool, curr_td, curr_td->td_dma);
+	}
+
+	if (req->mapped) {
+		dma_unmap_single(ep->udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			ep_is_in(ep)
+				? DMA_TO_DEVICE
+				: DMA_FROM_DEVICE);
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	} else
+		dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			ep_is_in(ep)
+				? DMA_TO_DEVICE
+				: DMA_FROM_DEVICE);
+
+	if (status && (status != -ESHUTDOWN))
+		VDBG("complete %s req %p stat %d len %u/%u",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	ep->stopped = 1;
+
+	spin_unlock(&ep->udc->lock);
+	/* complete() is from gadget layer,
+	 * eg fsg->bulk_in_complete() */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&ep->udc->lock);
+	ep->stopped = stopped;
+}
+
+/*-----------------------------------------------------------------
+ * nuke(): delete all requests related to this ep
+ * called with spinlock held
+ *--------------------------------------------------------------*/
+static void nuke(struct fsl_ep *ep, int status)
+{
+	ep->stopped = 1;
+
+	/* Flush fifo */
+	fsl_ep_fifo_flush(&ep->ep);
+
+	/* Whether this eq has request linked */
+	while (!list_empty(&ep->queue)) {
+		struct fsl_req *req = NULL;
+
+		req = list_entry(ep->queue.next, struct fsl_req, queue);
+		done(ep, req, status);
+	}
+}
+
+/*------------------------------------------------------------------
+	Internal Hardware related function
+ ------------------------------------------------------------------*/
+
+static int dr_controller_setup(struct fsl_udc *udc)
+{
+	unsigned int tmp, portctrl, ep_num;
+	unsigned int max_no_of_ep;
+#ifndef CONFIG_ARCH_MXC
+	unsigned int ctrl;
+#endif
+	unsigned long timeout;
+#define FSL_UDC_RESET_TIMEOUT 1000
+
+	/* Config PHY interface */
+	portctrl = fsl_readl(&dr_regs->portsc1);
+	portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
+	switch (udc->phy_mode) {
+	case FSL_USB2_PHY_ULPI:
+		portctrl |= PORTSCX_PTS_ULPI;
+		break;
+	case FSL_USB2_PHY_UTMI_WIDE:
+		portctrl |= PORTSCX_PTW_16BIT;
+		/* fall through */
+	case FSL_USB2_PHY_UTMI:
+		portctrl |= PORTSCX_PTS_UTMI;
+		break;
+	case FSL_USB2_PHY_SERIAL:
+		portctrl |= PORTSCX_PTS_FSLS;
+		break;
+	default:
+		return -EINVAL;
+	}
+	fsl_writel(portctrl, &dr_regs->portsc1);
+
+	/* Stop and reset the usb controller */
+	tmp = fsl_readl(&dr_regs->usbcmd);
+	tmp &= ~USB_CMD_RUN_STOP;
+	fsl_writel(tmp, &dr_regs->usbcmd);
+
+	tmp = fsl_readl(&dr_regs->usbcmd);
+	tmp |= USB_CMD_CTRL_RESET;
+	fsl_writel(tmp, &dr_regs->usbcmd);
+
+	/* Wait for reset to complete */
+	timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
+	while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
+		if (time_after(jiffies, timeout)) {
+			ERR("udc reset timeout!\n");
+			return -ETIMEDOUT;
+		}
+		cpu_relax();
+	}
+
+	/* Set the controller as device mode */
+	tmp = fsl_readl(&dr_regs->usbmode);
+	tmp &= ~USB_MODE_CTRL_MODE_MASK;	/* clear mode bits */
+	tmp |= USB_MODE_CTRL_MODE_DEVICE;
+	/* Disable Setup Lockout */
+	tmp |= USB_MODE_SETUP_LOCK_OFF;
+	if (udc->pdata->es)
+		tmp |= USB_MODE_ES;
+	fsl_writel(tmp, &dr_regs->usbmode);
+
+	/* Clear the setup status */
+	fsl_writel(0, &dr_regs->usbsts);
+
+	tmp = udc->ep_qh_dma;
+	tmp &= USB_EP_LIST_ADDRESS_MASK;
+	fsl_writel(tmp, &dr_regs->endpointlistaddr);
+
+	VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
+		udc->ep_qh, (int)tmp,
+		fsl_readl(&dr_regs->endpointlistaddr));
+
+	max_no_of_ep = (0x0000001F & fsl_readl(&dr_regs->dccparams));
+	for (ep_num = 1; ep_num < max_no_of_ep; ep_num++) {
+		tmp = fsl_readl(&dr_regs->endptctrl[ep_num]);
+		tmp &= ~(EPCTRL_TX_TYPE | EPCTRL_RX_TYPE);
+		tmp |= (EPCTRL_EP_TYPE_BULK << EPCTRL_TX_EP_TYPE_SHIFT)
+		| (EPCTRL_EP_TYPE_BULK << EPCTRL_RX_EP_TYPE_SHIFT);
+		fsl_writel(tmp, &dr_regs->endptctrl[ep_num]);
+	}
+	/* Config control enable i/o output, cpu endian register */
+#ifndef CONFIG_ARCH_MXC
+	if (udc->pdata->have_sysif_regs) {
+		ctrl = __raw_readl(&usb_sys_regs->control);
+		ctrl |= USB_CTRL_IOENB;
+		__raw_writel(ctrl, &usb_sys_regs->control);
+	}
+#endif
+
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+	/* Turn on cache snooping hardware, since some PowerPC platforms
+	 * wholly rely on hardware to deal with cache coherent. */
+
+	if (udc->pdata->have_sysif_regs) {
+		/* Setup Snooping for all the 4GB space */
+		tmp = SNOOP_SIZE_2GB;	/* starts from 0x0, size 2G */
+		__raw_writel(tmp, &usb_sys_regs->snoop1);
+		tmp |= 0x80000000;	/* starts from 0x8000000, size 2G */
+		__raw_writel(tmp, &usb_sys_regs->snoop2);
+	}
+#endif
+
+	return 0;
+}
+
+/* Enable DR irq and set controller to run state */
+static void dr_controller_run(struct fsl_udc *udc)
+{
+	u32 temp;
+
+	/* Enable DR irq reg */
+	temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
+		| USB_INTR_PTC_DETECT_EN | USB_INTR_RESET_EN
+		| USB_INTR_DEVICE_SUSPEND | USB_INTR_SYS_ERR_EN;
+
+	fsl_writel(temp, &dr_regs->usbintr);
+
+	/* Clear stopped bit */
+	udc->stopped = 0;
+
+	/* Set the controller as device mode */
+	temp = fsl_readl(&dr_regs->usbmode);
+	temp |= USB_MODE_CTRL_MODE_DEVICE;
+	fsl_writel(temp, &dr_regs->usbmode);
+
+	/* Set controller to Run */
+	temp = fsl_readl(&dr_regs->usbcmd);
+	temp |= USB_CMD_RUN_STOP;
+	fsl_writel(temp, &dr_regs->usbcmd);
+}
+
+static void dr_controller_stop(struct fsl_udc *udc)
+{
+	unsigned int tmp;
+
+	pr_debug("%s\n", __func__);
+
+	/* if we're in OTG mode, and the Host is currently using the port,
+	 * stop now and don't rip the controller out from under the
+	 * ehci driver
+	 */
+	if (udc->gadget.is_otg) {
+		if (!(fsl_readl(&dr_regs->otgsc) & OTGSC_STS_USB_ID)) {
+			pr_debug("udc: Leaving early\n");
+			return;
+		}
+	}
+
+	/* disable all INTR */
+	fsl_writel(0, &dr_regs->usbintr);
+
+	/* Set stopped bit for isr */
+	udc->stopped = 1;
+
+	/* disable IO output */
+/*	usb_sys_regs->control = 0; */
+
+	/* set controller to Stop */
+	tmp = fsl_readl(&dr_regs->usbcmd);
+	tmp &= ~USB_CMD_RUN_STOP;
+	fsl_writel(tmp, &dr_regs->usbcmd);
+}
+
+static void dr_ep_setup(unsigned char ep_num, unsigned char dir,
+			unsigned char ep_type)
+{
+	unsigned int tmp_epctrl = 0;
+
+	tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+	if (dir) {
+		if (ep_num)
+			tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
+		tmp_epctrl |= EPCTRL_TX_ENABLE;
+		tmp_epctrl &= ~EPCTRL_TX_TYPE;
+		tmp_epctrl |= ((unsigned int)(ep_type)
+				<< EPCTRL_TX_EP_TYPE_SHIFT);
+	} else {
+		if (ep_num)
+			tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
+		tmp_epctrl |= EPCTRL_RX_ENABLE;
+		tmp_epctrl &= ~EPCTRL_RX_TYPE;
+		tmp_epctrl |= ((unsigned int)(ep_type)
+				<< EPCTRL_RX_EP_TYPE_SHIFT);
+	}
+
+	fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
+}
+
+static void
+dr_ep_change_stall(unsigned char ep_num, unsigned char dir, int value)
+{
+	u32 tmp_epctrl = 0;
+
+	tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+
+	if (value) {
+		/* set the stall bit */
+		if (dir)
+			tmp_epctrl |= EPCTRL_TX_EP_STALL;
+		else
+			tmp_epctrl |= EPCTRL_RX_EP_STALL;
+	} else {
+		/* clear the stall bit and reset data toggle */
+		if (dir) {
+			tmp_epctrl &= ~EPCTRL_TX_EP_STALL;
+			tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
+		} else {
+			tmp_epctrl &= ~EPCTRL_RX_EP_STALL;
+			tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
+		}
+	}
+	fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
+}
+
+/* Get stall status of a specific ep
+   Return: 0: not stalled; 1:stalled */
+static int dr_ep_get_stall(unsigned char ep_num, unsigned char dir)
+{
+	u32 epctrl;
+
+	epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+	if (dir)
+		return (epctrl & EPCTRL_TX_EP_STALL) ? 1 : 0;
+	else
+		return (epctrl & EPCTRL_RX_EP_STALL) ? 1 : 0;
+}
+
+/********************************************************************
+	Internal Structure Build up functions
+********************************************************************/
+
+/*------------------------------------------------------------------
+* struct_ep_qh_setup(): set the Endpoint Capabilites field of QH
+ * @zlt: Zero Length Termination Select (1: disable; 0: enable)
+ * @mult: Mult field
+ ------------------------------------------------------------------*/
+static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
+		unsigned char dir, unsigned char ep_type,
+		unsigned int max_pkt_len,
+		unsigned int zlt, unsigned char mult)
+{
+	struct ep_queue_head *p_QH = &udc->ep_qh[2 * ep_num + dir];
+	unsigned int tmp = 0;
+
+	/* set the Endpoint Capabilites in QH */
+	switch (ep_type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		/* Interrupt On Setup (IOS). for control ep  */
+		tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+			| EP_QUEUE_HEAD_IOS;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+			| (mult << EP_QUEUE_HEAD_MULT_POS);
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+	case USB_ENDPOINT_XFER_INT:
+		tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
+		break;
+	default:
+		VDBG("error ep type is %d", ep_type);
+		return;
+	}
+	if (zlt)
+		tmp |= EP_QUEUE_HEAD_ZLT_SEL;
+
+	p_QH->max_pkt_length = cpu_to_hc32(tmp);
+	p_QH->next_dtd_ptr = 1;
+	p_QH->size_ioc_int_sts = 0;
+}
+
+/* Setup qh structure and ep register for ep0. */
+static void ep0_setup(struct fsl_udc *udc)
+{
+	/* the intialization of an ep includes: fields in QH, Regs,
+	 * fsl_ep struct */
+	struct_ep_qh_setup(udc, 0, USB_RECV, USB_ENDPOINT_XFER_CONTROL,
+			USB_MAX_CTRL_PAYLOAD, 0, 0);
+	struct_ep_qh_setup(udc, 0, USB_SEND, USB_ENDPOINT_XFER_CONTROL,
+			USB_MAX_CTRL_PAYLOAD, 0, 0);
+	dr_ep_setup(0, USB_RECV, USB_ENDPOINT_XFER_CONTROL);
+	dr_ep_setup(0, USB_SEND, USB_ENDPOINT_XFER_CONTROL);
+
+	return;
+
+}
+
+/***********************************************************************
+		Endpoint Management Functions
+***********************************************************************/
+
+/*-------------------------------------------------------------------------
+ * when configurations are set, or when interface settings change
+ * for example the do_set_interface() in gadget layer,
+ * the driver will enable or disable the relevant endpoints
+ * ep0 doesn't use this routine. It is always enabled.
+-------------------------------------------------------------------------*/
+static int fsl_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct fsl_udc *udc = NULL;
+	struct fsl_ep *ep = NULL;
+	unsigned short max = 0;
+	unsigned char mult = 0, zlt;
+	int retval = -EINVAL;
+	unsigned long flags = 0;
+
+	ep = container_of(_ep, struct fsl_ep, ep);
+
+	/* catch various bogus parameters */
+	if (!_ep || !desc || ep->desc
+			|| (desc->bDescriptorType != USB_DT_ENDPOINT))
+		return -EINVAL;
+
+	udc = ep->udc;
+
+	if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+
+	max = usb_endpoint_maxp(desc);
+
+	/* Disable automatic zlp generation.  Driver is responsible to indicate
+	 * explicitly through req->req.zero.  This is needed to enable multi-td
+	 * request. */
+	zlt = 1;
+
+	/* Assume the max packet size from gadget is always correct */
+	switch (desc->bmAttributes & 0x03) {
+	case USB_ENDPOINT_XFER_CONTROL:
+	case USB_ENDPOINT_XFER_BULK:
+	case USB_ENDPOINT_XFER_INT:
+		/* mult = 0.  Execute N Transactions as demonstrated by
+		 * the USB variable length packet protocol where N is
+		 * computed using the Maximum Packet Length (dQH) and
+		 * the Total Bytes field (dTD) */
+		mult = 0;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		/* Calculate transactions needed for high bandwidth iso */
+		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+		max = max & 0x7ff;	/* bit 0~10 */
+		/* 3 transactions at most */
+		if (mult > 3)
+			goto en_done;
+		break;
+	default:
+		goto en_done;
+	}
+
+	spin_lock_irqsave(&udc->lock, flags);
+	ep->ep.maxpacket = max;
+	ep->desc = desc;
+	ep->stopped = 0;
+
+	/* Controller related setup */
+	/* Init EPx Queue Head (Ep Capabilites field in QH
+	 * according to max, zlt, mult) */
+	struct_ep_qh_setup(udc, (unsigned char) ep_index(ep),
+			(unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
+					?  USB_SEND : USB_RECV),
+			(unsigned char) (desc->bmAttributes
+					& USB_ENDPOINT_XFERTYPE_MASK),
+			max, zlt, mult);
+
+	/* Init endpoint ctrl register */
+	dr_ep_setup((unsigned char) ep_index(ep),
+			(unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
+					? USB_SEND : USB_RECV),
+			(unsigned char) (desc->bmAttributes
+					& USB_ENDPOINT_XFERTYPE_MASK));
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	retval = 0;
+
+	VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
+			ep->desc->bEndpointAddress & 0x0f,
+			(desc->bEndpointAddress & USB_DIR_IN)
+				? "in" : "out", max);
+en_done:
+	return retval;
+}
+
+/*---------------------------------------------------------------------
+ * @ep : the ep being unconfigured. May not be ep0
+ * Any pending and uncomplete req will complete with status (-ESHUTDOWN)
+*---------------------------------------------------------------------*/
+static int fsl_ep_disable(struct usb_ep *_ep)
+{
+	struct fsl_udc *udc = NULL;
+	struct fsl_ep *ep = NULL;
+	unsigned long flags = 0;
+	u32 epctrl;
+	int ep_num;
+
+	ep = container_of(_ep, struct fsl_ep, ep);
+	if (!_ep || !ep->desc) {
+		VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	/* disable ep on controller */
+	ep_num = ep_index(ep);
+	epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+	if (ep_is_in(ep)) {
+		epctrl &= ~(EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE);
+		epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_TX_EP_TYPE_SHIFT;
+	} else {
+		epctrl &= ~(EPCTRL_RX_ENABLE | EPCTRL_TX_TYPE);
+		epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_RX_EP_TYPE_SHIFT;
+	}
+	fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+
+	udc = (struct fsl_udc *)ep->udc;
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->stopped = 1;
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	VDBG("disabled %s OK", _ep->name);
+	return 0;
+}
+
+/*---------------------------------------------------------------------
+ * allocate a request object used by this endpoint
+ * the main operation is to insert the req->queue to the eq->queue
+ * Returns the request, or null if one could not be allocated
+*---------------------------------------------------------------------*/
+static struct usb_request *
+fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct fsl_req *req = NULL;
+
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct fsl_req *req = NULL;
+
+	req = container_of(_req, struct fsl_req, req);
+
+	if (_req)
+		kfree(req);
+}
+
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+	struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+	/* Write dQH next pointer and terminate bit to 0 */
+	qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+			& EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+	/* Clear active and halt bit */
+	qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+					| EP_QUEUE_HEAD_STATUS_HALT));
+
+	/* Ensure that updates to the QH will occur before priming. */
+	wmb();
+
+	/* Prime endpoint by writing correct bit to ENDPTPRIME */
+	fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+			: (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
+static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
+{
+	u32 temp, bitmask, tmp_stat;
+
+	/* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
+	VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
+
+	bitmask = ep_is_in(ep)
+		? (1 << (ep_index(ep) + 16))
+		: (1 << (ep_index(ep)));
+
+	/* check if the pipe is empty */
+	if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) {
+		/* Add td to the end */
+		struct fsl_req *lastreq;
+		lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
+		lastreq->tail->next_td_ptr =
+			cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
+		/* Ensure dTD's next dtd pointer to be updated */
+		wmb();
+		/* Read prime bit, if 1 goto done */
+		if (fsl_readl(&dr_regs->endpointprime) & bitmask)
+			return;
+
+		do {
+			/* Set ATDTW bit in USBCMD */
+			temp = fsl_readl(&dr_regs->usbcmd);
+			fsl_writel(temp | USB_CMD_ATDTW, &dr_regs->usbcmd);
+
+			/* Read correct status bit */
+			tmp_stat = fsl_readl(&dr_regs->endptstatus) & bitmask;
+
+		} while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_ATDTW));
+
+		/* Write ATDTW bit to 0 */
+		temp = fsl_readl(&dr_regs->usbcmd);
+		fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
+
+		if (tmp_stat)
+			return;
+	}
+
+	fsl_prime_ep(ep, req->head);
+}
+
+/* Fill in the dTD structure
+ * @req: request that the transfer belongs to
+ * @length: return actually data length of the dTD
+ * @dma: return dma address of the dTD
+ * @is_last: return flag if it is the last dTD of the request
+ * return: pointer to the built dTD */
+static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
+		dma_addr_t *dma, int *is_last, gfp_t gfp_flags)
+{
+	u32 swap_temp;
+	struct ep_td_struct *dtd;
+
+	/* how big will this transfer be? */
+	*length = min(req->req.length - req->req.actual,
+			(unsigned)EP_MAX_LENGTH_TRANSFER);
+
+	dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma);
+	if (dtd == NULL)
+		return dtd;
+
+	dtd->td_dma = *dma;
+	/* Clear reserved field */
+	swap_temp = hc32_to_cpu(dtd->size_ioc_sts);
+	swap_temp &= ~DTD_RESERVED_FIELDS;
+	dtd->size_ioc_sts = cpu_to_hc32(swap_temp);
+
+	/* Init all of buffer page pointers */
+	swap_temp = (u32) (req->req.dma + req->req.actual);
+	dtd->buff_ptr0 = cpu_to_hc32(swap_temp);
+	dtd->buff_ptr1 = cpu_to_hc32(swap_temp + 0x1000);
+	dtd->buff_ptr2 = cpu_to_hc32(swap_temp + 0x2000);
+	dtd->buff_ptr3 = cpu_to_hc32(swap_temp + 0x3000);
+	dtd->buff_ptr4 = cpu_to_hc32(swap_temp + 0x4000);
+
+	req->req.actual += *length;
+
+	/* zlp is needed if req->req.zero is set */
+	if (req->req.zero) {
+		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+			*is_last = 1;
+		else
+			*is_last = 0;
+	} else if (req->req.length == req->req.actual)
+		*is_last = 1;
+	else
+		*is_last = 0;
+
+	if ((*is_last) == 0)
+		VDBG("multi-dtd request!");
+	/* Fill in the transfer size; set active bit */
+	swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
+
+	/* Enable interrupt for the last dtd of a request */
+	if (*is_last && !req->req.no_interrupt)
+		swap_temp |= DTD_IOC;
+
+	dtd->size_ioc_sts = cpu_to_hc32(swap_temp);
+
+	mb();
+
+	VDBG("length = %d address= 0x%x", *length, (int)*dma);
+
+	return dtd;
+}
+
+/* Generate dtd chain for a request */
+static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
+{
+	unsigned	count;
+	int		is_last;
+	int		is_first =1;
+	struct ep_td_struct	*last_dtd = NULL, *dtd;
+	dma_addr_t dma;
+
+	do {
+		dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
+		if (dtd == NULL)
+			return -ENOMEM;
+
+		if (is_first) {
+			is_first = 0;
+			req->head = dtd;
+		} else {
+			last_dtd->next_td_ptr = cpu_to_hc32(dma);
+			last_dtd->next_td_virt = dtd;
+		}
+		last_dtd = dtd;
+
+		req->dtd_count++;
+	} while (!is_last);
+
+	dtd->next_td_ptr = cpu_to_hc32(DTD_NEXT_TERMINATE);
+
+	req->tail = dtd;
+
+	return 0;
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
+	struct fsl_req *req = container_of(_req, struct fsl_req, req);
+	struct fsl_udc *udc;
+	unsigned long flags;
+
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		VDBG("%s, bad params", __func__);
+		return -EINVAL;
+	}
+	if (unlikely(!_ep || !ep->desc)) {
+		VDBG("%s, bad ep", __func__);
+		return -EINVAL;
+	}
+	if (usb_endpoint_xfer_isoc(ep->desc)) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+	}
+
+	udc = ep->udc;
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	req->ep = ep;
+
+	/* map virtual address to hardware */
+	if (req->req.dma == DMA_ADDR_INVALID) {
+		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+					req->req.buf,
+					req->req.length, ep_is_in(ep)
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+		req->mapped = 1;
+	} else {
+		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+					req->req.dma, req->req.length,
+					ep_is_in(ep)
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+		req->mapped = 0;
+	}
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->dtd_count = 0;
+
+	/* build dtds and push them to device queue */
+	if (!fsl_req_to_dtd(req, gfp_flags)) {
+		spin_lock_irqsave(&udc->lock, flags);
+		fsl_queue_td(ep, req);
+	} else {
+		return -ENOMEM;
+	}
+
+	/* irq handler advances the queue */
+	if (req != NULL)
+		list_add_tail(&req->queue, &ep->queue);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
+	struct fsl_req *req;
+	unsigned long flags;
+	int ep_num, stopped, ret = 0;
+	u32 epctrl;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	stopped = ep->stopped;
+
+	/* Stop the ep before we deal with the queue */
+	ep->stopped = 1;
+	ep_num = ep_index(ep);
+	epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+	if (ep_is_in(ep))
+		epctrl &= ~EPCTRL_TX_ENABLE;
+	else
+		epctrl &= ~EPCTRL_RX_ENABLE;
+	fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* The request is in progress, or completed but not dequeued */
+	if (ep->queue.next == &req->queue) {
+		_req->status = -ECONNRESET;
+		fsl_ep_fifo_flush(_ep);	/* flush current transfer */
+
+		/* The request isn't the last request in this ep queue */
+		if (req->queue.next != &ep->queue) {
+			struct fsl_req *next_req;
+
+			next_req = list_entry(req->queue.next, struct fsl_req,
+					queue);
+
+			/* prime with dTD of next request */
+			fsl_prime_ep(ep, next_req->head);
+		}
+	/* The request hasn't been processed, patch up the TD chain */
+	} else {
+		struct fsl_req *prev_req;
+
+		prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
+		prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
+	}
+
+	done(ep, req, -ECONNRESET);
+
+	/* Enable EP */
+out:	epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+	if (ep_is_in(ep))
+		epctrl |= EPCTRL_TX_ENABLE;
+	else
+		epctrl |= EPCTRL_RX_ENABLE;
+	fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+	ep->stopped = stopped;
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*-----------------------------------------------------------------
+ * modify the endpoint halt feature
+ * @ep: the non-isochronous endpoint being stalled
+ * @value: 1--set halt  0--clear halt
+ * Returns zero, or a negative error code.
+*----------------------------------------------------------------*/
+static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct fsl_ep *ep = NULL;
+	unsigned long flags = 0;
+	int status = -EOPNOTSUPP;	/* operation not supported */
+	unsigned char ep_dir = 0, ep_num = 0;
+	struct fsl_udc *udc = NULL;
+
+	ep = container_of(_ep, struct fsl_ep, ep);
+	udc = ep->udc;
+	if (!_ep || !ep->desc) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	if (usb_endpoint_xfer_isoc(ep->desc)) {
+		status = -EOPNOTSUPP;
+		goto out;
+	}
+
+	/* Attempt to halt IN ep will fail if any transfer requests
+	 * are still queue */
+	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
+		status = -EAGAIN;
+		goto out;
+	}
+
+	status = 0;
+	ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
+	ep_num = (unsigned char)(ep_index(ep));
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	dr_ep_change_stall(ep_num, ep_dir, value);
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+	if (ep_index(ep) == 0) {
+		udc->ep0_state = WAIT_FOR_SETUP;
+		udc->ep0_dir = 0;
+	}
+out:
+	VDBG(" %s %s halt stat %d", ep->ep.name,
+			value ?  "set" : "clear", status);
+
+	return status;
+}
+
+static int fsl_ep_fifo_status(struct usb_ep *_ep)
+{
+	struct fsl_ep *ep;
+	struct fsl_udc *udc;
+	int size = 0;
+	u32 bitmask;
+	struct ep_queue_head *qh;
+
+	ep = container_of(_ep, struct fsl_ep, ep);
+	if (!_ep || (!ep->desc && ep_index(ep) != 0))
+		return -ENODEV;
+
+	udc = (struct fsl_udc *)ep->udc;
+
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	qh = get_qh_by_ep(ep);
+
+	bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
+	    (1 << (ep_index(ep)));
+
+	if (fsl_readl(&dr_regs->endptstatus) & bitmask)
+		size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+		    >> DTD_LENGTH_BIT_POS;
+
+	pr_debug("%s %u\n", __func__, size);
+	return size;
+}
+
+static void fsl_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct fsl_ep *ep;
+	int ep_num, ep_dir;
+	u32 bits;
+	unsigned long timeout;
+#define FSL_UDC_FLUSH_TIMEOUT 1000
+
+	if (!_ep) {
+		return;
+	} else {
+		ep = container_of(_ep, struct fsl_ep, ep);
+		if (!ep->desc)
+			return;
+	}
+	ep_num = ep_index(ep);
+	ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
+
+	if (ep_num == 0)
+		bits = (1 << 16) | 1;
+	else if (ep_dir == USB_SEND)
+		bits = 1 << (16 + ep_num);
+	else
+		bits = 1 << ep_num;
+
+	timeout = jiffies + FSL_UDC_FLUSH_TIMEOUT;
+	do {
+		fsl_writel(bits, &dr_regs->endptflush);
+
+		/* Wait until flush complete */
+		while (fsl_readl(&dr_regs->endptflush)) {
+			if (time_after(jiffies, timeout)) {
+				ERR("ep flush timeout\n");
+				return;
+			}
+			cpu_relax();
+		}
+		/* See if we need to flush again */
+	} while (fsl_readl(&dr_regs->endptstatus) & bits);
+}
+
+static struct usb_ep_ops fsl_ep_ops = {
+	.enable = fsl_ep_enable,
+	.disable = fsl_ep_disable,
+
+	.alloc_request = fsl_alloc_request,
+	.free_request = fsl_free_request,
+
+	.queue = fsl_ep_queue,
+	.dequeue = fsl_ep_dequeue,
+
+	.set_halt = fsl_ep_set_halt,
+	.fifo_status = fsl_ep_fifo_status,
+	.fifo_flush = fsl_ep_fifo_flush,	/* flush fifo */
+};
+
+/*-------------------------------------------------------------------------
+		Gadget Driver Layer Operations
+-------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------
+ * Get the current frame number (from DR frame_index Reg )
+ *----------------------------------------------------------------------*/
+static int fsl_get_frame(struct usb_gadget *gadget)
+{
+	return (int)(fsl_readl(&dr_regs->frindex) & USB_FRINDEX_MASKS);
+}
+
+/*-----------------------------------------------------------------------
+ * Tries to wake up the host connected to this gadget
+ -----------------------------------------------------------------------*/
+static int fsl_wakeup(struct usb_gadget *gadget)
+{
+	struct fsl_udc *udc = container_of(gadget, struct fsl_udc, gadget);
+	u32 portsc;
+
+	/* Remote wakeup feature not enabled by host */
+	if (!udc->remote_wakeup)
+		return -ENOTSUPP;
+
+	portsc = fsl_readl(&dr_regs->portsc1);
+	/* not suspended? */
+	if (!(portsc & PORTSCX_PORT_SUSPEND))
+		return 0;
+	/* trigger force resume */
+	portsc |= PORTSCX_PORT_FORCE_RESUME;
+	fsl_writel(portsc, &dr_regs->portsc1);
+	return 0;
+}
+
+static int can_pullup(struct fsl_udc *udc)
+{
+	return udc->driver && udc->softconnect && udc->vbus_active;
+}
+
+/* Notify controller that VBUS is powered, Called by whatever
+   detects VBUS sessions */
+static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct fsl_udc	*udc;
+	unsigned long	flags;
+
+	udc = container_of(gadget, struct fsl_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+	VDBG("VBUS %s", is_active ? "on" : "off");
+	udc->vbus_active = (is_active != 0);
+	if (can_pullup(udc))
+		fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
+				&dr_regs->usbcmd);
+	else
+		fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
+				&dr_regs->usbcmd);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+/* constrain controller's VBUS power usage
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume.  For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	struct fsl_udc *udc;
+
+	udc = container_of(gadget, struct fsl_udc, gadget);
+	if (udc->transceiver)
+		return usb_phy_set_power(udc->transceiver, mA);
+	return -ENOTSUPP;
+}
+
+/* Change Data+ pullup status
+ * this func is used by usb_gadget_connect/disconnet
+ */
+static int fsl_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct fsl_udc *udc;
+
+	udc = container_of(gadget, struct fsl_udc, gadget);
+	udc->softconnect = (is_on != 0);
+	if (can_pullup(udc))
+		fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
+				&dr_regs->usbcmd);
+	else
+		fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
+				&dr_regs->usbcmd);
+
+	return 0;
+}
+
+static int fsl_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int fsl_stop(struct usb_gadget_driver *driver);
+/* defined in gadget.h */
+static struct usb_gadget_ops fsl_gadget_ops = {
+	.get_frame = fsl_get_frame,
+	.wakeup = fsl_wakeup,
+/*	.set_selfpowered = fsl_set_selfpowered,	*/ /* Always selfpowered */
+	.vbus_session = fsl_vbus_session,
+	.vbus_draw = fsl_vbus_draw,
+	.pullup = fsl_pullup,
+	.start = fsl_start,
+	.stop = fsl_stop,
+};
+
+/* Set protocol stall on ep0, protocol stall will automatically be cleared
+   on new transaction */
+static void ep0stall(struct fsl_udc *udc)
+{
+	u32 tmp;
+
+	/* must set tx and rx to stall at the same time */
+	tmp = fsl_readl(&dr_regs->endptctrl[0]);
+	tmp |= EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL;
+	fsl_writel(tmp, &dr_regs->endptctrl[0]);
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->ep0_dir = 0;
+}
+
+/* Prime a status phase for ep0 */
+static int ep0_prime_status(struct fsl_udc *udc, int direction)
+{
+	struct fsl_req *req = udc->status_req;
+	struct fsl_ep *ep;
+
+	if (direction == EP_DIR_IN)
+		udc->ep0_dir = USB_DIR_IN;
+	else
+		udc->ep0_dir = USB_DIR_OUT;
+
+	ep = &udc->eps[0];
+	if (udc->ep0_state != DATA_STATE_XMIT)
+		udc->ep0_state = WAIT_FOR_OUT_STATUS;
+
+	req->ep = ep;
+	req->req.length = 0;
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = NULL;
+	req->dtd_count = 0;
+
+	req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+			req->req.buf, req->req.length,
+			ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	req->mapped = 1;
+
+	if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
+		fsl_queue_td(ep, req);
+	else
+		return -ENOMEM;
+
+	list_add_tail(&req->queue, &ep->queue);
+
+	return 0;
+}
+
+static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
+{
+	struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
+
+	if (ep->name)
+		nuke(ep, -ESHUTDOWN);
+}
+
+/*
+ * ch9 Set address
+ */
+static void ch9setaddress(struct fsl_udc *udc, u16 value, u16 index, u16 length)
+{
+	/* Save the new address to device struct */
+	udc->device_address = (u8) value;
+	/* Update usb state */
+	udc->usb_state = USB_STATE_ADDRESS;
+	/* Status phase */
+	if (ep0_prime_status(udc, EP_DIR_IN))
+		ep0stall(udc);
+}
+
+/*
+ * ch9 Get status
+ */
+static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
+		u16 index, u16 length)
+{
+	u16 tmp = 0;		/* Status, cpu endian */
+	struct fsl_req *req;
+	struct fsl_ep *ep;
+
+	ep = &udc->eps[0];
+
+	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		/* Get device status */
+		tmp = 1 << USB_DEVICE_SELF_POWERED;
+		tmp |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+		/* Get interface status */
+		/* We don't have interface information in udc driver */
+		tmp = 0;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+		/* Get endpoint status */
+		struct fsl_ep *target_ep;
+
+		target_ep = get_ep_by_pipe(udc, get_pipe_by_windex(index));
+
+		/* stall if endpoint doesn't exist */
+		if (!target_ep->desc)
+			goto stall;
+		tmp = dr_ep_get_stall(ep_index(target_ep), ep_is_in(target_ep))
+				<< USB_ENDPOINT_HALT;
+	}
+
+	udc->ep0_dir = USB_DIR_IN;
+	/* Borrow the per device status_req */
+	req = udc->status_req;
+	/* Fill in the reqest structure */
+	*((u16 *) req->req.buf) = cpu_to_le16(tmp);
+
+	req->ep = ep;
+	req->req.length = 2;
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = NULL;
+	req->dtd_count = 0;
+
+	req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+				req->req.buf, req->req.length,
+				ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	req->mapped = 1;
+
+	/* prime the data phase */
+	if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
+		fsl_queue_td(ep, req);
+	else			/* no mem */
+		goto stall;
+
+	list_add_tail(&req->queue, &ep->queue);
+	udc->ep0_state = DATA_STATE_XMIT;
+	if (ep0_prime_status(udc, EP_DIR_OUT))
+		ep0stall(udc);
+
+	return;
+stall:
+	ep0stall(udc);
+}
+
+static void setup_received_irq(struct fsl_udc *udc,
+		struct usb_ctrlrequest *setup)
+{
+	u16 wValue = le16_to_cpu(setup->wValue);
+	u16 wIndex = le16_to_cpu(setup->wIndex);
+	u16 wLength = le16_to_cpu(setup->wLength);
+
+	udc_reset_ep_queue(udc, 0);
+
+	/* We process some stardard setup requests here */
+	switch (setup->bRequest) {
+	case USB_REQ_GET_STATUS:
+		/* Data+Status phase from udc */
+		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+					!= (USB_DIR_IN | USB_TYPE_STANDARD))
+			break;
+		ch9getstatus(udc, setup->bRequestType, wValue, wIndex, wLength);
+		return;
+
+	case USB_REQ_SET_ADDRESS:
+		/* Status phase from udc */
+		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
+						| USB_RECIP_DEVICE))
+			break;
+		ch9setaddress(udc, wValue, wIndex, wLength);
+		return;
+
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+		/* Status phase from udc */
+	{
+		int rc = -EOPNOTSUPP;
+		u16 ptc = 0;
+
+		if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+				== (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
+			int pipe = get_pipe_by_windex(wIndex);
+			struct fsl_ep *ep;
+
+			if (wValue != 0 || wLength != 0 || pipe >= udc->max_ep)
+				break;
+			ep = get_ep_by_pipe(udc, pipe);
+
+			spin_unlock(&udc->lock);
+			rc = fsl_ep_set_halt(&ep->ep,
+					(setup->bRequest == USB_REQ_SET_FEATURE)
+						? 1 : 0);
+			spin_lock(&udc->lock);
+
+		} else if ((setup->bRequestType & (USB_RECIP_MASK
+				| USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+				| USB_TYPE_STANDARD)) {
+			/* Note: The driver has not include OTG support yet.
+			 * This will be set when OTG support is added */
+			if (wValue == USB_DEVICE_TEST_MODE)
+				ptc = wIndex >> 8;
+			else if (gadget_is_otg(&udc->gadget)) {
+				if (setup->bRequest ==
+				    USB_DEVICE_B_HNP_ENABLE)
+					udc->gadget.b_hnp_enable = 1;
+				else if (setup->bRequest ==
+					 USB_DEVICE_A_HNP_SUPPORT)
+					udc->gadget.a_hnp_support = 1;
+				else if (setup->bRequest ==
+					 USB_DEVICE_A_ALT_HNP_SUPPORT)
+					udc->gadget.a_alt_hnp_support = 1;
+			}
+			rc = 0;
+		} else
+			break;
+
+		if (rc == 0) {
+			if (ep0_prime_status(udc, EP_DIR_IN))
+				ep0stall(udc);
+		}
+		if (ptc) {
+			u32 tmp;
+
+			mdelay(10);
+			tmp = fsl_readl(&dr_regs->portsc1) | (ptc << 16);
+			fsl_writel(tmp, &dr_regs->portsc1);
+			printk(KERN_INFO "udc: switch to test mode %d.\n", ptc);
+		}
+
+		return;
+	}
+
+	default:
+		break;
+	}
+
+	/* Requests handled by gadget */
+	if (wLength) {
+		/* Data phase from gadget, status phase from udc */
+		udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+				?  USB_DIR_IN : USB_DIR_OUT;
+		spin_unlock(&udc->lock);
+		if (udc->driver->setup(&udc->gadget,
+				&udc->local_setup_buff) < 0)
+			ep0stall(udc);
+		spin_lock(&udc->lock);
+		udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
+				?  DATA_STATE_XMIT : DATA_STATE_RECV;
+		/*
+		 * If the data stage is IN, send status prime immediately.
+		 * See 2.0 Spec chapter 8.5.3.3 for detail.
+		 */
+		if (udc->ep0_state == DATA_STATE_XMIT)
+			if (ep0_prime_status(udc, EP_DIR_OUT))
+				ep0stall(udc);
+
+	} else {
+		/* No data phase, IN status from gadget */
+		udc->ep0_dir = USB_DIR_IN;
+		spin_unlock(&udc->lock);
+		if (udc->driver->setup(&udc->gadget,
+				&udc->local_setup_buff) < 0)
+			ep0stall(udc);
+		spin_lock(&udc->lock);
+		udc->ep0_state = WAIT_FOR_OUT_STATUS;
+	}
+}
+
+/* Process request for Data or Status phase of ep0
+ * prime status phase if needed */
+static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
+		struct fsl_req *req)
+{
+	if (udc->usb_state == USB_STATE_ADDRESS) {
+		/* Set the new address */
+		u32 new_address = (u32) udc->device_address;
+		fsl_writel(new_address << USB_DEVICE_ADDRESS_BIT_POS,
+				&dr_regs->deviceaddr);
+	}
+
+	done(ep0, req, 0);
+
+	switch (udc->ep0_state) {
+	case DATA_STATE_XMIT:
+		/* already primed at setup_received_irq */
+		udc->ep0_state = WAIT_FOR_OUT_STATUS;
+		break;
+	case DATA_STATE_RECV:
+		/* send status phase */
+		if (ep0_prime_status(udc, EP_DIR_IN))
+			ep0stall(udc);
+		break;
+	case WAIT_FOR_OUT_STATUS:
+		udc->ep0_state = WAIT_FOR_SETUP;
+		break;
+	case WAIT_FOR_SETUP:
+		ERR("Unexpect ep0 packets\n");
+		break;
+	default:
+		ep0stall(udc);
+		break;
+	}
+}
+
+/* Tripwire mechanism to ensure a setup packet payload is extracted without
+ * being corrupted by another incoming setup packet */
+static void tripwire_handler(struct fsl_udc *udc, u8 ep_num, u8 *buffer_ptr)
+{
+	u32 temp;
+	struct ep_queue_head *qh;
+	struct fsl_usb2_platform_data *pdata = udc->pdata;
+
+	qh = &udc->ep_qh[ep_num * 2 + EP_DIR_OUT];
+
+	/* Clear bit in ENDPTSETUPSTAT */
+	temp = fsl_readl(&dr_regs->endptsetupstat);
+	fsl_writel(temp | (1 << ep_num), &dr_regs->endptsetupstat);
+
+	/* while a hazard exists when setup package arrives */
+	do {
+		/* Set Setup Tripwire */
+		temp = fsl_readl(&dr_regs->usbcmd);
+		fsl_writel(temp | USB_CMD_SUTW, &dr_regs->usbcmd);
+
+		/* Copy the setup packet to local buffer */
+		if (pdata->le_setup_buf) {
+			u32 *p = (u32 *)buffer_ptr;
+			u32 *s = (u32 *)qh->setup_buffer;
+
+			/* Convert little endian setup buffer to CPU endian */
+			*p++ = le32_to_cpu(*s++);
+			*p = le32_to_cpu(*s);
+		} else {
+			memcpy(buffer_ptr, (u8 *) qh->setup_buffer, 8);
+		}
+	} while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_SUTW));
+
+	/* Clear Setup Tripwire */
+	temp = fsl_readl(&dr_regs->usbcmd);
+	fsl_writel(temp & ~USB_CMD_SUTW, &dr_regs->usbcmd);
+}
+
+/* process-ep_req(): free the completed Tds for this req */
+static int process_ep_req(struct fsl_udc *udc, int pipe,
+		struct fsl_req *curr_req)
+{
+	struct ep_td_struct *curr_td;
+	int	td_complete, actual, remaining_length, j, tmp;
+	int	status = 0;
+	int	errors = 0;
+	struct  ep_queue_head *curr_qh = &udc->ep_qh[pipe];
+	int direction = pipe % 2;
+
+	curr_td = curr_req->head;
+	td_complete = 0;
+	actual = curr_req->req.length;
+
+	for (j = 0; j < curr_req->dtd_count; j++) {
+		remaining_length = (hc32_to_cpu(curr_td->size_ioc_sts)
+					& DTD_PACKET_SIZE)
+				>> DTD_LENGTH_BIT_POS;
+		actual -= remaining_length;
+
+		errors = hc32_to_cpu(curr_td->size_ioc_sts);
+		if (errors & DTD_ERROR_MASK) {
+			if (errors & DTD_STATUS_HALTED) {
+				ERR("dTD error %08x QH=%d\n", errors, pipe);
+				/* Clear the errors and Halt condition */
+				tmp = hc32_to_cpu(curr_qh->size_ioc_int_sts);
+				tmp &= ~errors;
+				curr_qh->size_ioc_int_sts = cpu_to_hc32(tmp);
+				status = -EPIPE;
+				/* FIXME: continue with next queued TD? */
+
+				break;
+			}
+			if (errors & DTD_STATUS_DATA_BUFF_ERR) {
+				VDBG("Transfer overflow");
+				status = -EPROTO;
+				break;
+			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
+				VDBG("ISO error");
+				status = -EILSEQ;
+				break;
+			} else
+				ERR("Unknown error has occurred (0x%x)!\n",
+					errors);
+
+		} else if (hc32_to_cpu(curr_td->size_ioc_sts)
+				& DTD_STATUS_ACTIVE) {
+			VDBG("Request not complete");
+			status = REQ_UNCOMPLETE;
+			return status;
+		} else if (remaining_length) {
+			if (direction) {
+				VDBG("Transmit dTD remaining length not zero");
+				status = -EPROTO;
+				break;
+			} else {
+				td_complete++;
+				break;
+			}
+		} else {
+			td_complete++;
+			VDBG("dTD transmitted successful");
+		}
+
+		if (j != curr_req->dtd_count - 1)
+			curr_td = (struct ep_td_struct *)curr_td->next_td_virt;
+	}
+
+	if (status)
+		return status;
+
+	curr_req->req.actual = actual;
+
+	return 0;
+}
+
+/* Process a DTD completion interrupt */
+static void dtd_complete_irq(struct fsl_udc *udc)
+{
+	u32 bit_pos;
+	int i, ep_num, direction, bit_mask, status;
+	struct fsl_ep *curr_ep;
+	struct fsl_req *curr_req, *temp_req;
+
+	/* Clear the bits in the register */
+	bit_pos = fsl_readl(&dr_regs->endptcomplete);
+	fsl_writel(bit_pos, &dr_regs->endptcomplete);
+
+	if (!bit_pos)
+		return;
+
+	for (i = 0; i < udc->max_ep; i++) {
+		ep_num = i >> 1;
+		direction = i % 2;
+
+		bit_mask = 1 << (ep_num + 16 * direction);
+
+		if (!(bit_pos & bit_mask))
+			continue;
+
+		curr_ep = get_ep_by_pipe(udc, i);
+
+		/* If the ep is configured */
+		if (curr_ep->name == NULL) {
+			WARNING("Invalid EP?");
+			continue;
+		}
+
+		/* process the req queue until an uncomplete request */
+		list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue,
+				queue) {
+			status = process_ep_req(udc, i, curr_req);
+
+			VDBG("status of process_ep_req= %d, ep = %d",
+					status, ep_num);
+			if (status == REQ_UNCOMPLETE)
+				break;
+			/* write back status to req */
+			curr_req->req.status = status;
+
+			if (ep_num == 0) {
+				ep0_req_complete(udc, curr_ep, curr_req);
+				break;
+			} else
+				done(curr_ep, curr_req, status);
+		}
+	}
+}
+
+static inline enum usb_device_speed portscx_device_speed(u32 reg)
+{
+	switch (reg & PORTSCX_PORT_SPEED_MASK) {
+	case PORTSCX_PORT_SPEED_HIGH:
+		return USB_SPEED_HIGH;
+	case PORTSCX_PORT_SPEED_FULL:
+		return USB_SPEED_FULL;
+	case PORTSCX_PORT_SPEED_LOW:
+		return USB_SPEED_LOW;
+	default:
+		return USB_SPEED_UNKNOWN;
+	}
+}
+
+/* Process a port change interrupt */
+static void port_change_irq(struct fsl_udc *udc)
+{
+	if (udc->bus_reset)
+		udc->bus_reset = 0;
+
+	/* Bus resetting is finished */
+	if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET))
+		/* Get the speed */
+		udc->gadget.speed =
+			portscx_device_speed(fsl_readl(&dr_regs->portsc1));
+
+	/* Update USB state */
+	if (!udc->resume_state)
+		udc->usb_state = USB_STATE_DEFAULT;
+}
+
+/* Process suspend interrupt */
+static void suspend_irq(struct fsl_udc *udc)
+{
+	udc->resume_state = udc->usb_state;
+	udc->usb_state = USB_STATE_SUSPENDED;
+
+	/* report suspend to the driver, serial.c does not support this */
+	if (udc->driver->suspend)
+		udc->driver->suspend(&udc->gadget);
+}
+
+static void bus_resume(struct fsl_udc *udc)
+{
+	udc->usb_state = udc->resume_state;
+	udc->resume_state = 0;
+
+	/* report resume to the driver, serial.c does not support this */
+	if (udc->driver->resume)
+		udc->driver->resume(&udc->gadget);
+}
+
+/* Clear up all ep queues */
+static int reset_queues(struct fsl_udc *udc)
+{
+	u8 pipe;
+
+	for (pipe = 0; pipe < udc->max_pipes; pipe++)
+		udc_reset_ep_queue(udc, pipe);
+
+	/* report disconnect; the driver is already quiesced */
+	spin_unlock(&udc->lock);
+	udc->driver->disconnect(&udc->gadget);
+	spin_lock(&udc->lock);
+
+	return 0;
+}
+
+/* Process reset interrupt */
+static void reset_irq(struct fsl_udc *udc)
+{
+	u32 temp;
+	unsigned long timeout;
+
+	/* Clear the device address */
+	temp = fsl_readl(&dr_regs->deviceaddr);
+	fsl_writel(temp & ~USB_DEVICE_ADDRESS_MASK, &dr_regs->deviceaddr);
+
+	udc->device_address = 0;
+
+	/* Clear usb state */
+	udc->resume_state = 0;
+	udc->ep0_dir = 0;
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->remote_wakeup = 0;	/* default to 0 on reset */
+	udc->gadget.b_hnp_enable = 0;
+	udc->gadget.a_hnp_support = 0;
+	udc->gadget.a_alt_hnp_support = 0;
+
+	/* Clear all the setup token semaphores */
+	temp = fsl_readl(&dr_regs->endptsetupstat);
+	fsl_writel(temp, &dr_regs->endptsetupstat);
+
+	/* Clear all the endpoint complete status bits */
+	temp = fsl_readl(&dr_regs->endptcomplete);
+	fsl_writel(temp, &dr_regs->endptcomplete);
+
+	timeout = jiffies + 100;
+	while (fsl_readl(&dr_regs->endpointprime)) {
+		/* Wait until all endptprime bits cleared */
+		if (time_after(jiffies, timeout)) {
+			ERR("Timeout for reset\n");
+			break;
+		}
+		cpu_relax();
+	}
+
+	/* Write 1s to the flush register */
+	fsl_writel(0xffffffff, &dr_regs->endptflush);
+
+	if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
+		VDBG("Bus reset");
+		/* Bus is reseting */
+		udc->bus_reset = 1;
+		/* Reset all the queues, include XD, dTD, EP queue
+		 * head and TR Queue */
+		reset_queues(udc);
+		udc->usb_state = USB_STATE_DEFAULT;
+	} else {
+		VDBG("Controller reset");
+		/* initialize usb hw reg except for regs for EP, not
+		 * touch usbintr reg */
+		dr_controller_setup(udc);
+
+		/* Reset all internal used Queues */
+		reset_queues(udc);
+
+		ep0_setup(udc);
+
+		/* Enable DR IRQ reg, Set Run bit, change udc state */
+		dr_controller_run(udc);
+		udc->usb_state = USB_STATE_ATTACHED;
+	}
+}
+
+/*
+ * USB device controller interrupt handler
+ */
+static irqreturn_t fsl_udc_irq(int irq, void *_udc)
+{
+	struct fsl_udc *udc = _udc;
+	u32 irq_src;
+	irqreturn_t status = IRQ_NONE;
+	unsigned long flags;
+
+	/* Disable ISR for OTG host mode */
+	if (udc->stopped)
+		return IRQ_NONE;
+	spin_lock_irqsave(&udc->lock, flags);
+	irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
+	/* Clear notification bits */
+	fsl_writel(irq_src, &dr_regs->usbsts);
+
+	/* VDBG("irq_src [0x%8x]", irq_src); */
+
+	/* Need to resume? */
+	if (udc->usb_state == USB_STATE_SUSPENDED)
+		if ((fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_SUSPEND) == 0)
+			bus_resume(udc);
+
+	/* USB Interrupt */
+	if (irq_src & USB_STS_INT) {
+		VDBG("Packet int");
+		/* Setup package, we only support ep0 as control ep */
+		if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
+			tripwire_handler(udc, 0,
+					(u8 *) (&udc->local_setup_buff));
+			setup_received_irq(udc, &udc->local_setup_buff);
+			status = IRQ_HANDLED;
+		}
+
+		/* completion of dtd */
+		if (fsl_readl(&dr_regs->endptcomplete)) {
+			dtd_complete_irq(udc);
+			status = IRQ_HANDLED;
+		}
+	}
+
+	/* SOF (for ISO transfer) */
+	if (irq_src & USB_STS_SOF) {
+		status = IRQ_HANDLED;
+	}
+
+	/* Port Change */
+	if (irq_src & USB_STS_PORT_CHANGE) {
+		port_change_irq(udc);
+		status = IRQ_HANDLED;
+	}
+
+	/* Reset Received */
+	if (irq_src & USB_STS_RESET) {
+		VDBG("reset int");
+		reset_irq(udc);
+		status = IRQ_HANDLED;
+	}
+
+	/* Sleep Enable (Suspend) */
+	if (irq_src & USB_STS_SUSPEND) {
+		suspend_irq(udc);
+		status = IRQ_HANDLED;
+	}
+
+	if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
+		VDBG("Error IRQ %x", irq_src);
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return status;
+}
+
+/*----------------------------------------------------------------*
+ * Hook to gadget drivers
+ * Called by initialization code of gadget drivers
+*----------------------------------------------------------------*/
+static int fsl_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	int retval = -ENODEV;
+	unsigned long flags = 0;
+
+	if (!udc_controller)
+		return -ENODEV;
+
+	if (!driver || driver->max_speed < USB_SPEED_FULL
+			|| !bind || !driver->disconnect || !driver->setup)
+		return -EINVAL;
+
+	if (udc_controller->driver)
+		return -EBUSY;
+
+	/* lock is needed but whether should use this lock or another */
+	spin_lock_irqsave(&udc_controller->lock, flags);
+
+	driver->driver.bus = NULL;
+	/* hook up the driver */
+	udc_controller->driver = driver;
+	udc_controller->gadget.dev.driver = &driver->driver;
+	spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+	/* bind udc driver to gadget driver */
+	retval = bind(&udc_controller->gadget);
+	if (retval) {
+		VDBG("bind to %s --> %d", driver->driver.name, retval);
+		udc_controller->gadget.dev.driver = NULL;
+		udc_controller->driver = NULL;
+		goto out;
+	}
+
+	if (udc_controller->transceiver) {
+		/* Suspend the controller until OTG enable it */
+		udc_controller->stopped = 1;
+		printk(KERN_INFO "Suspend udc for OTG auto detect\n");
+
+		/* connect to bus through transceiver */
+		if (udc_controller->transceiver) {
+			retval = otg_set_peripheral(
+					udc_controller->transceiver->otg,
+						    &udc_controller->gadget);
+			if (retval < 0) {
+				ERR("can't bind to transceiver\n");
+				driver->unbind(&udc_controller->gadget);
+				udc_controller->gadget.dev.driver = 0;
+				udc_controller->driver = 0;
+				return retval;
+			}
+		}
+	} else {
+		/* Enable DR IRQ reg and set USBCMD reg Run bit */
+		dr_controller_run(udc_controller);
+		udc_controller->usb_state = USB_STATE_ATTACHED;
+		udc_controller->ep0_state = WAIT_FOR_SETUP;
+		udc_controller->ep0_dir = 0;
+	}
+	printk(KERN_INFO "%s: bind to driver %s\n",
+			udc_controller->gadget.name, driver->driver.name);
+
+out:
+	if (retval)
+		printk(KERN_WARNING "gadget driver register failed %d\n",
+		       retval);
+	return retval;
+}
+
+/* Disconnect from gadget driver */
+static int fsl_stop(struct usb_gadget_driver *driver)
+{
+	struct fsl_ep *loop_ep;
+	unsigned long flags;
+
+	if (!udc_controller)
+		return -ENODEV;
+
+	if (!driver || driver != udc_controller->driver || !driver->unbind)
+		return -EINVAL;
+
+	if (udc_controller->transceiver)
+		otg_set_peripheral(udc_controller->transceiver->otg, NULL);
+
+	/* stop DR, disable intr */
+	dr_controller_stop(udc_controller);
+
+	/* in fact, no needed */
+	udc_controller->usb_state = USB_STATE_ATTACHED;
+	udc_controller->ep0_state = WAIT_FOR_SETUP;
+	udc_controller->ep0_dir = 0;
+
+	/* stand operation */
+	spin_lock_irqsave(&udc_controller->lock, flags);
+	udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+	nuke(&udc_controller->eps[0], -ESHUTDOWN);
+	list_for_each_entry(loop_ep, &udc_controller->gadget.ep_list,
+			ep.ep_list)
+		nuke(loop_ep, -ESHUTDOWN);
+	spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+	/* report disconnect; the controller is already quiesced */
+	driver->disconnect(&udc_controller->gadget);
+
+	/* unbind gadget and unhook driver. */
+	driver->unbind(&udc_controller->gadget);
+	udc_controller->gadget.dev.driver = NULL;
+	udc_controller->driver = NULL;
+
+	printk(KERN_WARNING "unregistered gadget driver '%s'\n",
+	       driver->driver.name);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------
+		PROC File System Support
+-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#include <linux/seq_file.h>
+
+static const char proc_filename[] = "driver/fsl_usb2_udc";
+
+static int fsl_proc_read(char *page, char **start, off_t off, int count,
+		int *eof, void *_dev)
+{
+	char *buf = page;
+	char *next = buf;
+	unsigned size = count;
+	unsigned long flags;
+	int t, i;
+	u32 tmp_reg;
+	struct fsl_ep *ep = NULL;
+	struct fsl_req *req;
+
+	struct fsl_udc *udc = udc_controller;
+	if (off != 0)
+		return 0;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* ------basic driver information ---- */
+	t = scnprintf(next, size,
+			DRIVER_DESC "\n"
+			"%s version: %s\n"
+			"Gadget driver: %s\n\n",
+			driver_name, DRIVER_VERSION,
+			udc->driver ? udc->driver->driver.name : "(none)");
+	size -= t;
+	next += t;
+
+	/* ------ DR Registers ----- */
+	tmp_reg = fsl_readl(&dr_regs->usbcmd);
+	t = scnprintf(next, size,
+			"USBCMD reg:\n"
+			"SetupTW: %d\n"
+			"Run/Stop: %s\n\n",
+			(tmp_reg & USB_CMD_SUTW) ? 1 : 0,
+			(tmp_reg & USB_CMD_RUN_STOP) ? "Run" : "Stop");
+	size -= t;
+	next += t;
+
+	tmp_reg = fsl_readl(&dr_regs->usbsts);
+	t = scnprintf(next, size,
+			"USB Status Reg:\n"
+			"Dr Suspend: %d Reset Received: %d System Error: %s "
+			"USB Error Interrupt: %s\n\n",
+			(tmp_reg & USB_STS_SUSPEND) ? 1 : 0,
+			(tmp_reg & USB_STS_RESET) ? 1 : 0,
+			(tmp_reg & USB_STS_SYS_ERR) ? "Err" : "Normal",
+			(tmp_reg & USB_STS_ERR) ? "Err detected" : "No err");
+	size -= t;
+	next += t;
+
+	tmp_reg = fsl_readl(&dr_regs->usbintr);
+	t = scnprintf(next, size,
+			"USB Intrrupt Enable Reg:\n"
+			"Sleep Enable: %d SOF Received Enable: %d "
+			"Reset Enable: %d\n"
+			"System Error Enable: %d "
+			"Port Change Dectected Enable: %d\n"
+			"USB Error Intr Enable: %d USB Intr Enable: %d\n\n",
+			(tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0,
+			(tmp_reg & USB_INTR_SOF_EN) ? 1 : 0,
+			(tmp_reg & USB_INTR_RESET_EN) ? 1 : 0,
+			(tmp_reg & USB_INTR_SYS_ERR_EN) ? 1 : 0,
+			(tmp_reg & USB_INTR_PTC_DETECT_EN) ? 1 : 0,
+			(tmp_reg & USB_INTR_ERR_INT_EN) ? 1 : 0,
+			(tmp_reg & USB_INTR_INT_EN) ? 1 : 0);
+	size -= t;
+	next += t;
+
+	tmp_reg = fsl_readl(&dr_regs->frindex);
+	t = scnprintf(next, size,
+			"USB Frame Index Reg: Frame Number is 0x%x\n\n",
+			(tmp_reg & USB_FRINDEX_MASKS));
+	size -= t;
+	next += t;
+
+	tmp_reg = fsl_readl(&dr_regs->deviceaddr);
+	t = scnprintf(next, size,
+			"USB Device Address Reg: Device Addr is 0x%x\n\n",
+			(tmp_reg & USB_DEVICE_ADDRESS_MASK));
+	size -= t;
+	next += t;
+
+	tmp_reg = fsl_readl(&dr_regs->endpointlistaddr);
+	t = scnprintf(next, size,
+			"USB Endpoint List Address Reg: "
+			"Device Addr is 0x%x\n\n",
+			(tmp_reg & USB_EP_LIST_ADDRESS_MASK));
+	size -= t;
+	next += t;
+
+	tmp_reg = fsl_readl(&dr_regs->portsc1);
+	t = scnprintf(next, size,
+		"USB Port Status&Control Reg:\n"
+		"Port Transceiver Type : %s Port Speed: %s\n"
+		"PHY Low Power Suspend: %s Port Reset: %s "
+		"Port Suspend Mode: %s\n"
+		"Over-current Change: %s "
+		"Port Enable/Disable Change: %s\n"
+		"Port Enabled/Disabled: %s "
+		"Current Connect Status: %s\n\n", ( {
+			char *s;
+			switch (tmp_reg & PORTSCX_PTS_FSLS) {
+			case PORTSCX_PTS_UTMI:
+				s = "UTMI"; break;
+			case PORTSCX_PTS_ULPI:
+				s = "ULPI "; break;
+			case PORTSCX_PTS_FSLS:
+				s = "FS/LS Serial"; break;
+			default:
+				s = "None"; break;
+			}
+			s;} ),
+		usb_speed_string(portscx_device_speed(tmp_reg)),
+		(tmp_reg & PORTSCX_PHY_LOW_POWER_SPD) ?
+		"Normal PHY mode" : "Low power mode",
+		(tmp_reg & PORTSCX_PORT_RESET) ? "In Reset" :
+		"Not in Reset",
+		(tmp_reg & PORTSCX_PORT_SUSPEND) ? "In " : "Not in",
+		(tmp_reg & PORTSCX_OVER_CURRENT_CHG) ? "Dected" :
+		"No",
+		(tmp_reg & PORTSCX_PORT_EN_DIS_CHANGE) ? "Disable" :
+		"Not change",
+		(tmp_reg & PORTSCX_PORT_ENABLE) ? "Enable" :
+		"Not correct",
+		(tmp_reg & PORTSCX_CURRENT_CONNECT_STATUS) ?
+		"Attached" : "Not-Att");
+	size -= t;
+	next += t;
+
+	tmp_reg = fsl_readl(&dr_regs->usbmode);
+	t = scnprintf(next, size,
+			"USB Mode Reg: Controller Mode is: %s\n\n", ( {
+				char *s;
+				switch (tmp_reg & USB_MODE_CTRL_MODE_HOST) {
+				case USB_MODE_CTRL_MODE_IDLE:
+					s = "Idle"; break;
+				case USB_MODE_CTRL_MODE_DEVICE:
+					s = "Device Controller"; break;
+				case USB_MODE_CTRL_MODE_HOST:
+					s = "Host Controller"; break;
+				default:
+					s = "None"; break;
+				}
+				s;
+			} ));
+	size -= t;
+	next += t;
+
+	tmp_reg = fsl_readl(&dr_regs->endptsetupstat);
+	t = scnprintf(next, size,
+			"Endpoint Setup Status Reg: SETUP on ep 0x%x\n\n",
+			(tmp_reg & EP_SETUP_STATUS_MASK));
+	size -= t;
+	next += t;
+
+	for (i = 0; i < udc->max_ep / 2; i++) {
+		tmp_reg = fsl_readl(&dr_regs->endptctrl[i]);
+		t = scnprintf(next, size, "EP Ctrl Reg [0x%x]: = [0x%x]\n",
+				i, tmp_reg);
+		size -= t;
+		next += t;
+	}
+	tmp_reg = fsl_readl(&dr_regs->endpointprime);
+	t = scnprintf(next, size, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
+	size -= t;
+	next += t;
+
+#ifndef CONFIG_ARCH_MXC
+	if (udc->pdata->have_sysif_regs) {
+		tmp_reg = usb_sys_regs->snoop1;
+		t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
+		size -= t;
+		next += t;
+
+		tmp_reg = usb_sys_regs->control;
+		t = scnprintf(next, size, "General Control Reg : = [0x%x]\n\n",
+				tmp_reg);
+		size -= t;
+		next += t;
+	}
+#endif
+
+	/* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
+	ep = &udc->eps[0];
+	t = scnprintf(next, size, "For %s Maxpkt is 0x%x index is 0x%x\n",
+			ep->ep.name, ep_maxpacket(ep), ep_index(ep));
+	size -= t;
+	next += t;
+
+	if (list_empty(&ep->queue)) {
+		t = scnprintf(next, size, "its req queue is empty\n\n");
+		size -= t;
+		next += t;
+	} else {
+		list_for_each_entry(req, &ep->queue, queue) {
+			t = scnprintf(next, size,
+				"req %p actual 0x%x length 0x%x buf %p\n",
+				&req->req, req->req.actual,
+				req->req.length, req->req.buf);
+			size -= t;
+			next += t;
+		}
+	}
+	/* other gadget->eplist ep */
+	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+		if (ep->desc) {
+			t = scnprintf(next, size,
+					"\nFor %s Maxpkt is 0x%x "
+					"index is 0x%x\n",
+					ep->ep.name, ep_maxpacket(ep),
+					ep_index(ep));
+			size -= t;
+			next += t;
+
+			if (list_empty(&ep->queue)) {
+				t = scnprintf(next, size,
+						"its req queue is empty\n\n");
+				size -= t;
+				next += t;
+			} else {
+				list_for_each_entry(req, &ep->queue, queue) {
+					t = scnprintf(next, size,
+						"req %p actual 0x%x length "
+						"0x%x  buf %p\n",
+						&req->req, req->req.actual,
+						req->req.length, req->req.buf);
+					size -= t;
+					next += t;
+					}	/* end for each_entry of ep req */
+				}	/* end for else */
+			}	/* end for if(ep->queue) */
+		}		/* end (ep->desc) */
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	*eof = 1;
+	return count - size;
+}
+
+#define create_proc_file()	create_proc_read_entry(proc_filename, \
+				0, NULL, fsl_proc_read, NULL)
+
+#define remove_proc_file()	remove_proc_entry(proc_filename, NULL)
+
+#else				/* !CONFIG_USB_GADGET_DEBUG_FILES */
+
+#define create_proc_file()	do {} while (0)
+#define remove_proc_file()	do {} while (0)
+
+#endif				/* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*-------------------------------------------------------------------------*/
+
+/* Release udc structures */
+static void fsl_udc_release(struct device *dev)
+{
+	complete(udc_controller->done);
+	dma_free_coherent(dev->parent, udc_controller->ep_qh_size,
+			udc_controller->ep_qh, udc_controller->ep_qh_dma);
+	kfree(udc_controller);
+}
+
+/******************************************************************
+	Internal structure setup functions
+*******************************************************************/
+/*------------------------------------------------------------------
+ * init resource for globle controller
+ * Return the udc handle on success or NULL on failure
+ ------------------------------------------------------------------*/
+static int __init struct_udc_setup(struct fsl_udc *udc,
+		struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata;
+	size_t size;
+
+	pdata = pdev->dev.platform_data;
+	udc->phy_mode = pdata->phy_mode;
+
+	udc->eps = kzalloc(sizeof(struct fsl_ep) * udc->max_ep, GFP_KERNEL);
+	if (!udc->eps) {
+		ERR("malloc fsl_ep failed\n");
+		return -1;
+	}
+
+	/* initialized QHs, take care of alignment */
+	size = udc->max_ep * sizeof(struct ep_queue_head);
+	if (size < QH_ALIGNMENT)
+		size = QH_ALIGNMENT;
+	else if ((size % QH_ALIGNMENT) != 0) {
+		size += QH_ALIGNMENT + 1;
+		size &= ~(QH_ALIGNMENT - 1);
+	}
+	udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
+					&udc->ep_qh_dma, GFP_KERNEL);
+	if (!udc->ep_qh) {
+		ERR("malloc QHs for udc failed\n");
+		kfree(udc->eps);
+		return -1;
+	}
+
+	udc->ep_qh_size = size;
+
+	/* Initialize ep0 status request structure */
+	/* FIXME: fsl_alloc_request() ignores ep argument */
+	udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
+			struct fsl_req, req);
+	/* allocate a small amount of memory to get valid address */
+	udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+
+	udc->resume_state = USB_STATE_NOTATTACHED;
+	udc->usb_state = USB_STATE_POWERED;
+	udc->ep0_dir = 0;
+	udc->remote_wakeup = 0;	/* default to 0 on reset */
+
+	return 0;
+}
+
+/*----------------------------------------------------------------
+ * Setup the fsl_ep struct for eps
+ * Link fsl_ep->ep to gadget->ep_list
+ * ep0out is not used so do nothing here
+ * ep0in should be taken care
+ *--------------------------------------------------------------*/
+static int __init struct_ep_setup(struct fsl_udc *udc, unsigned char index,
+		char *name, int link)
+{
+	struct fsl_ep *ep = &udc->eps[index];
+
+	ep->udc = udc;
+	strcpy(ep->name, name);
+	ep->ep.name = ep->name;
+
+	ep->ep.ops = &fsl_ep_ops;
+	ep->stopped = 0;
+
+	/* for ep0: maxP defined in desc
+	 * for other eps, maxP is set by epautoconfig() called by gadget layer
+	 */
+	ep->ep.maxpacket = (unsigned short) ~0;
+
+	/* the queue lists any req for this ep */
+	INIT_LIST_HEAD(&ep->queue);
+
+	/* gagdet.ep_list used for ep_autoconfig so no ep0 */
+	if (link)
+		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+	ep->gadget = &udc->gadget;
+	ep->qh = &udc->ep_qh[index];
+
+	return 0;
+}
+
+/* Driver probe function
+ * all intialization operations implemented here except enabling usb_intr reg
+ * board setup should have been done in the platform code
+ */
+static int __init fsl_udc_probe(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata;
+	struct resource *res;
+	int ret = -ENODEV;
+	unsigned int i;
+	u32 dccparams;
+
+	if (strcmp(pdev->name, driver_name)) {
+		VDBG("Wrong device");
+		return -ENODEV;
+	}
+
+	udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
+	if (udc_controller == NULL) {
+		ERR("malloc udc failed\n");
+		return -ENOMEM;
+	}
+
+	pdata = pdev->dev.platform_data;
+	udc_controller->pdata = pdata;
+	spin_lock_init(&udc_controller->lock);
+	udc_controller->stopped = 1;
+
+#ifdef CONFIG_USB_OTG
+	if (pdata->operating_mode == FSL_USB2_DR_OTG) {
+		udc_controller->transceiver = usb_get_transceiver();
+		if (!udc_controller->transceiver) {
+			ERR("Can't find OTG driver!\n");
+			ret = -ENODEV;
+			goto err_kfree;
+		}
+	}
+#endif
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENXIO;
+		goto err_kfree;
+	}
+
+	if (pdata->operating_mode == FSL_USB2_DR_DEVICE) {
+		if (!request_mem_region(res->start, resource_size(res),
+					driver_name)) {
+			ERR("request mem region for %s failed\n", pdev->name);
+			ret = -EBUSY;
+			goto err_kfree;
+		}
+	}
+
+	dr_regs = ioremap(res->start, resource_size(res));
+	if (!dr_regs) {
+		ret = -ENOMEM;
+		goto err_release_mem_region;
+	}
+
+	pdata->regs = (void *)dr_regs;
+
+	/*
+	 * do platform specific init: check the clock, grab/config pins, etc.
+	 */
+	if (pdata->init && pdata->init(pdev)) {
+		ret = -ENODEV;
+		goto err_iounmap_noclk;
+	}
+
+	/* Set accessors only after pdata->init() ! */
+	fsl_set_accessors(pdata);
+
+#ifndef CONFIG_ARCH_MXC
+	if (pdata->have_sysif_regs)
+		usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
+#endif
+
+	/* Initialize USB clocks */
+	ret = fsl_udc_clk_init(pdev);
+	if (ret < 0)
+		goto err_iounmap_noclk;
+
+	/* Read Device Controller Capability Parameters register */
+	dccparams = fsl_readl(&dr_regs->dccparams);
+	if (!(dccparams & DCCPARAMS_DC)) {
+		ERR("This SOC doesn't support device role\n");
+		ret = -ENODEV;
+		goto err_iounmap;
+	}
+	/* Get max device endpoints */
+	/* DEN is bidirectional ep number, max_ep doubles the number */
+	udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
+
+	udc_controller->irq = platform_get_irq(pdev, 0);
+	if (!udc_controller->irq) {
+		ret = -ENODEV;
+		goto err_iounmap;
+	}
+
+	ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
+			driver_name, udc_controller);
+	if (ret != 0) {
+		ERR("cannot request irq %d err %d\n",
+				udc_controller->irq, ret);
+		goto err_iounmap;
+	}
+
+	/* Initialize the udc structure including QH member and other member */
+	if (struct_udc_setup(udc_controller, pdev)) {
+		ERR("Can't initialize udc data structure\n");
+		ret = -ENOMEM;
+		goto err_free_irq;
+	}
+
+	if (!udc_controller->transceiver) {
+		/* initialize usb hw reg except for regs for EP,
+		 * leave usbintr reg untouched */
+		dr_controller_setup(udc_controller);
+	}
+
+	fsl_udc_clk_finalize(pdev);
+
+	/* Setup gadget structure */
+	udc_controller->gadget.ops = &fsl_gadget_ops;
+	udc_controller->gadget.max_speed = USB_SPEED_HIGH;
+	udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
+	INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
+	udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+	udc_controller->gadget.name = driver_name;
+
+	/* Setup gadget.dev and register with kernel */
+	dev_set_name(&udc_controller->gadget.dev, "gadget");
+	udc_controller->gadget.dev.release = fsl_udc_release;
+	udc_controller->gadget.dev.parent = &pdev->dev;
+	ret = device_register(&udc_controller->gadget.dev);
+	if (ret < 0)
+		goto err_free_irq;
+
+	if (udc_controller->transceiver)
+		udc_controller->gadget.is_otg = 1;
+
+	/* setup QH and epctrl for ep0 */
+	ep0_setup(udc_controller);
+
+	/* setup udc->eps[] for ep0 */
+	struct_ep_setup(udc_controller, 0, "ep0", 0);
+	/* for ep0: the desc defined here;
+	 * for other eps, gadget layer called ep_enable with defined desc
+	 */
+	udc_controller->eps[0].desc = &fsl_ep0_desc;
+	udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
+
+	/* setup the udc->eps[] for non-control endpoints and link
+	 * to gadget.ep_list */
+	for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
+		char name[14];
+
+		sprintf(name, "ep%dout", i);
+		struct_ep_setup(udc_controller, i * 2, name, 1);
+		sprintf(name, "ep%din", i);
+		struct_ep_setup(udc_controller, i * 2 + 1, name, 1);
+	}
+
+	/* use dma_pool for TD management */
+	udc_controller->td_pool = dma_pool_create("udc_td", &pdev->dev,
+			sizeof(struct ep_td_struct),
+			DTD_ALIGNMENT, UDC_DMA_BOUNDARY);
+	if (udc_controller->td_pool == NULL) {
+		ret = -ENOMEM;
+		goto err_unregister;
+	}
+
+	ret = usb_add_gadget_udc(&pdev->dev, &udc_controller->gadget);
+	if (ret)
+		goto err_del_udc;
+
+	create_proc_file();
+	return 0;
+
+err_del_udc:
+	dma_pool_destroy(udc_controller->td_pool);
+err_unregister:
+	device_unregister(&udc_controller->gadget.dev);
+err_free_irq:
+	free_irq(udc_controller->irq, udc_controller);
+err_iounmap:
+	if (pdata->exit)
+		pdata->exit(pdev);
+	fsl_udc_clk_release();
+err_iounmap_noclk:
+	iounmap(dr_regs);
+err_release_mem_region:
+	if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
+		release_mem_region(res->start, resource_size(res));
+err_kfree:
+	kfree(udc_controller);
+	udc_controller = NULL;
+	return ret;
+}
+
+/* Driver removal function
+ * Free resources and finish pending transactions
+ */
+static int __exit fsl_udc_remove(struct platform_device *pdev)
+{
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+
+	DECLARE_COMPLETION(done);
+
+	if (!udc_controller)
+		return -ENODEV;
+
+	usb_del_gadget_udc(&udc_controller->gadget);
+	udc_controller->done = &done;
+
+	fsl_udc_clk_release();
+
+	/* DR has been stopped in usb_gadget_unregister_driver() */
+	remove_proc_file();
+
+	/* Free allocated memory */
+	kfree(udc_controller->status_req->req.buf);
+	kfree(udc_controller->status_req);
+	kfree(udc_controller->eps);
+
+	dma_pool_destroy(udc_controller->td_pool);
+	free_irq(udc_controller->irq, udc_controller);
+	iounmap(dr_regs);
+	if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
+		release_mem_region(res->start, resource_size(res));
+
+	device_unregister(&udc_controller->gadget.dev);
+	/* free udc --wait for the release() finished */
+	wait_for_completion(&done);
+
+	/*
+	 * do platform specific un-initialization:
+	 * release iomux pins, etc.
+	 */
+	if (pdata->exit)
+		pdata->exit(pdev);
+
+	return 0;
+}
+
+/*-----------------------------------------------------------------
+ * Modify Power management attributes
+ * Used by OTG statemachine to disable gadget temporarily
+ -----------------------------------------------------------------*/
+static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	dr_controller_stop(udc_controller);
+	return 0;
+}
+
+/*-----------------------------------------------------------------
+ * Invoked on USB resume. May be called in_interrupt.
+ * Here we start the DR controller and enable the irq
+ *-----------------------------------------------------------------*/
+static int fsl_udc_resume(struct platform_device *pdev)
+{
+	/* Enable DR irq reg and set controller Run */
+	if (udc_controller->stopped) {
+		dr_controller_setup(udc_controller);
+		dr_controller_run(udc_controller);
+	}
+	udc_controller->usb_state = USB_STATE_ATTACHED;
+	udc_controller->ep0_state = WAIT_FOR_SETUP;
+	udc_controller->ep0_dir = 0;
+	return 0;
+}
+
+static int fsl_udc_otg_suspend(struct device *dev, pm_message_t state)
+{
+	struct fsl_udc *udc = udc_controller;
+	u32 mode, usbcmd;
+
+	mode = fsl_readl(&dr_regs->usbmode) & USB_MODE_CTRL_MODE_MASK;
+
+	pr_debug("%s(): mode 0x%x stopped %d\n", __func__, mode, udc->stopped);
+
+	/*
+	 * If the controller is already stopped, then this must be a
+	 * PM suspend.  Remember this fact, so that we will leave the
+	 * controller stopped at PM resume time.
+	 */
+	if (udc->stopped) {
+		pr_debug("gadget already stopped, leaving early\n");
+		udc->already_stopped = 1;
+		return 0;
+	}
+
+	if (mode != USB_MODE_CTRL_MODE_DEVICE) {
+		pr_debug("gadget not in device mode, leaving early\n");
+		return 0;
+	}
+
+	/* stop the controller */
+	usbcmd = fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP;
+	fsl_writel(usbcmd, &dr_regs->usbcmd);
+
+	udc->stopped = 1;
+
+	pr_info("USB Gadget suspended\n");
+
+	return 0;
+}
+
+static int fsl_udc_otg_resume(struct device *dev)
+{
+	pr_debug("%s(): stopped %d  already_stopped %d\n", __func__,
+		 udc_controller->stopped, udc_controller->already_stopped);
+
+	/*
+	 * If the controller was stopped at suspend time, then
+	 * don't resume it now.
+	 */
+	if (udc_controller->already_stopped) {
+		udc_controller->already_stopped = 0;
+		pr_debug("gadget was already stopped, leaving early\n");
+		return 0;
+	}
+
+	pr_info("USB Gadget resume\n");
+
+	return fsl_udc_resume(NULL);
+}
+
+/*-------------------------------------------------------------------------
+	Register entry point for the peripheral controller driver
+--------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+	.remove  = __exit_p(fsl_udc_remove),
+	/* these suspend and resume are not usb suspend and resume */
+	.suspend = fsl_udc_suspend,
+	.resume  = fsl_udc_resume,
+	.driver  = {
+		.name = (char *)driver_name,
+		.owner = THIS_MODULE,
+		/* udc suspend/resume called from OTG driver */
+		.suspend = fsl_udc_otg_suspend,
+		.resume  = fsl_udc_otg_resume,
+	},
+};
+
+static int __init udc_init(void)
+{
+	printk(KERN_INFO "%s (%s)\n", driver_desc, DRIVER_VERSION);
+	return platform_driver_probe(&udc_driver, fsl_udc_probe);
+}
+
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+	platform_driver_unregister(&udc_driver);
+	printk(KERN_WARNING "%s unregistered\n", driver_desc);
+}
+
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:fsl-usb2-udc");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_usb2_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_usb2_udc.h
new file mode 100644
index 0000000..e651469
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fsl_usb2_udc.h
@@ -0,0 +1,600 @@
+/*
+ * Freescale USB device/endpoint management registers
+ */
+#ifndef __FSL_USB2_UDC_H
+#define __FSL_USB2_UDC_H
+
+/* ### define USB registers here
+ */
+#define USB_MAX_CTRL_PAYLOAD		64
+#define USB_DR_SYS_OFFSET		0x400
+
+ /* USB DR device mode registers (Little Endian) */
+struct usb_dr_device {
+	/* Capability register */
+	u8 res1[256];
+	u16 caplength;		/* Capability Register Length */
+	u16 hciversion;		/* Host Controller Interface Version */
+	u32 hcsparams;		/* Host Controller Structural Parameters */
+	u32 hccparams;		/* Host Controller Capability Parameters */
+	u8 res2[20];
+	u32 dciversion;		/* Device Controller Interface Version */
+	u32 dccparams;		/* Device Controller Capability Parameters */
+	u8 res3[24];
+	/* Operation register */
+	u32 usbcmd;		/* USB Command Register */
+	u32 usbsts;		/* USB Status Register */
+	u32 usbintr;		/* USB Interrupt Enable Register */
+	u32 frindex;		/* Frame Index Register */
+	u8 res4[4];
+	u32 deviceaddr;		/* Device Address */
+	u32 endpointlistaddr;	/* Endpoint List Address Register */
+	u8 res5[4];
+	u32 burstsize;		/* Master Interface Data Burst Size Register */
+	u32 txttfilltuning;	/* Transmit FIFO Tuning Controls Register */
+	u8 res6[24];
+	u32 configflag;		/* Configure Flag Register */
+	u32 portsc1;		/* Port 1 Status and Control Register */
+	u8 res7[28];
+	u32 otgsc;		/* On-The-Go Status and Control */
+	u32 usbmode;		/* USB Mode Register */
+	u32 endptsetupstat;	/* Endpoint Setup Status Register */
+	u32 endpointprime;	/* Endpoint Initialization Register */
+	u32 endptflush;		/* Endpoint Flush Register */
+	u32 endptstatus;	/* Endpoint Status Register */
+	u32 endptcomplete;	/* Endpoint Complete Register */
+	u32 endptctrl[6];	/* Endpoint Control Registers */
+};
+
+ /* USB DR host mode registers (Little Endian) */
+struct usb_dr_host {
+	/* Capability register */
+	u8 res1[256];
+	u16 caplength;		/* Capability Register Length */
+	u16 hciversion;		/* Host Controller Interface Version */
+	u32 hcsparams;		/* Host Controller Structural Parameters */
+	u32 hccparams;		/* Host Controller Capability Parameters */
+	u8 res2[20];
+	u32 dciversion;		/* Device Controller Interface Version */
+	u32 dccparams;		/* Device Controller Capability Parameters */
+	u8 res3[24];
+	/* Operation register */
+	u32 usbcmd;		/* USB Command Register */
+	u32 usbsts;		/* USB Status Register */
+	u32 usbintr;		/* USB Interrupt Enable Register */
+	u32 frindex;		/* Frame Index Register */
+	u8 res4[4];
+	u32 periodiclistbase;	/* Periodic Frame List Base Address Register */
+	u32 asynclistaddr;	/* Current Asynchronous List Address Register */
+	u8 res5[4];
+	u32 burstsize;		/* Master Interface Data Burst Size Register */
+	u32 txttfilltuning;	/* Transmit FIFO Tuning Controls Register */
+	u8 res6[24];
+	u32 configflag;		/* Configure Flag Register */
+	u32 portsc1;		/* Port 1 Status and Control Register */
+	u8 res7[28];
+	u32 otgsc;		/* On-The-Go Status and Control */
+	u32 usbmode;		/* USB Mode Register */
+	u32 endptsetupstat;	/* Endpoint Setup Status Register */
+	u32 endpointprime;	/* Endpoint Initialization Register */
+	u32 endptflush;		/* Endpoint Flush Register */
+	u32 endptstatus;	/* Endpoint Status Register */
+	u32 endptcomplete;	/* Endpoint Complete Register */
+	u32 endptctrl[6];	/* Endpoint Control Registers */
+};
+
+ /* non-EHCI USB system interface registers (Big Endian) */
+struct usb_sys_interface {
+	u32 snoop1;
+	u32 snoop2;
+	u32 age_cnt_thresh;	/* Age Count Threshold Register */
+	u32 pri_ctrl;		/* Priority Control Register */
+	u32 si_ctrl;		/* System Interface Control Register */
+	u8 res[236];
+	u32 control;		/* General Purpose Control Register */
+};
+
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP          0
+#define DATA_STATE_XMIT         1
+#define DATA_STATE_NEED_ZLP     2
+#define WAIT_FOR_OUT_STATUS     3
+#define DATA_STATE_RECV         4
+
+/* Device Controller Capability Parameter register */
+#define DCCPARAMS_DC				0x00000080
+#define DCCPARAMS_DEN_MASK			0x0000001f
+
+/* Frame Index Register Bit Masks */
+#define	USB_FRINDEX_MASKS			0x3fff
+/* USB CMD  Register Bit Masks */
+#define  USB_CMD_RUN_STOP                     0x00000001
+#define  USB_CMD_CTRL_RESET                   0x00000002
+#define  USB_CMD_PERIODIC_SCHEDULE_EN         0x00000010
+#define  USB_CMD_ASYNC_SCHEDULE_EN            0x00000020
+#define  USB_CMD_INT_AA_DOORBELL              0x00000040
+#define  USB_CMD_ASP                          0x00000300
+#define  USB_CMD_ASYNC_SCH_PARK_EN            0x00000800
+#define  USB_CMD_SUTW                         0x00002000
+#define  USB_CMD_ATDTW                        0x00004000
+#define  USB_CMD_ITC                          0x00FF0000
+
+/* bit 15,3,2 are frame list size */
+#define  USB_CMD_FRAME_SIZE_1024              0x00000000
+#define  USB_CMD_FRAME_SIZE_512               0x00000004
+#define  USB_CMD_FRAME_SIZE_256               0x00000008
+#define  USB_CMD_FRAME_SIZE_128               0x0000000C
+#define  USB_CMD_FRAME_SIZE_64                0x00008000
+#define  USB_CMD_FRAME_SIZE_32                0x00008004
+#define  USB_CMD_FRAME_SIZE_16                0x00008008
+#define  USB_CMD_FRAME_SIZE_8                 0x0000800C
+
+/* bit 9-8 are async schedule park mode count */
+#define  USB_CMD_ASP_00                       0x00000000
+#define  USB_CMD_ASP_01                       0x00000100
+#define  USB_CMD_ASP_10                       0x00000200
+#define  USB_CMD_ASP_11                       0x00000300
+#define  USB_CMD_ASP_BIT_POS                  8
+
+/* bit 23-16 are interrupt threshold control */
+#define  USB_CMD_ITC_NO_THRESHOLD             0x00000000
+#define  USB_CMD_ITC_1_MICRO_FRM              0x00010000
+#define  USB_CMD_ITC_2_MICRO_FRM              0x00020000
+#define  USB_CMD_ITC_4_MICRO_FRM              0x00040000
+#define  USB_CMD_ITC_8_MICRO_FRM              0x00080000
+#define  USB_CMD_ITC_16_MICRO_FRM             0x00100000
+#define  USB_CMD_ITC_32_MICRO_FRM             0x00200000
+#define  USB_CMD_ITC_64_MICRO_FRM             0x00400000
+#define  USB_CMD_ITC_BIT_POS                  16
+
+/* USB STS Register Bit Masks */
+#define  USB_STS_INT                          0x00000001
+#define  USB_STS_ERR                          0x00000002
+#define  USB_STS_PORT_CHANGE                  0x00000004
+#define  USB_STS_FRM_LST_ROLL                 0x00000008
+#define  USB_STS_SYS_ERR                      0x00000010
+#define  USB_STS_IAA                          0x00000020
+#define  USB_STS_RESET                        0x00000040
+#define  USB_STS_SOF                          0x00000080
+#define  USB_STS_SUSPEND                      0x00000100
+#define  USB_STS_HC_HALTED                    0x00001000
+#define  USB_STS_RCL                          0x00002000
+#define  USB_STS_PERIODIC_SCHEDULE            0x00004000
+#define  USB_STS_ASYNC_SCHEDULE               0x00008000
+
+/* USB INTR Register Bit Masks */
+#define  USB_INTR_INT_EN                      0x00000001
+#define  USB_INTR_ERR_INT_EN                  0x00000002
+#define  USB_INTR_PTC_DETECT_EN               0x00000004
+#define  USB_INTR_FRM_LST_ROLL_EN             0x00000008
+#define  USB_INTR_SYS_ERR_EN                  0x00000010
+#define  USB_INTR_ASYN_ADV_EN                 0x00000020
+#define  USB_INTR_RESET_EN                    0x00000040
+#define  USB_INTR_SOF_EN                      0x00000080
+#define  USB_INTR_DEVICE_SUSPEND              0x00000100
+
+/* Device Address bit masks */
+#define  USB_DEVICE_ADDRESS_MASK              0xFE000000
+#define  USB_DEVICE_ADDRESS_BIT_POS           25
+
+/* endpoint list address bit masks */
+#define USB_EP_LIST_ADDRESS_MASK              0xfffff800
+
+/* PORTSCX  Register Bit Masks */
+#define  PORTSCX_CURRENT_CONNECT_STATUS       0x00000001
+#define  PORTSCX_CONNECT_STATUS_CHANGE        0x00000002
+#define  PORTSCX_PORT_ENABLE                  0x00000004
+#define  PORTSCX_PORT_EN_DIS_CHANGE           0x00000008
+#define  PORTSCX_OVER_CURRENT_ACT             0x00000010
+#define  PORTSCX_OVER_CURRENT_CHG             0x00000020
+#define  PORTSCX_PORT_FORCE_RESUME            0x00000040
+#define  PORTSCX_PORT_SUSPEND                 0x00000080
+#define  PORTSCX_PORT_RESET                   0x00000100
+#define  PORTSCX_LINE_STATUS_BITS             0x00000C00
+#define  PORTSCX_PORT_POWER                   0x00001000
+#define  PORTSCX_PORT_INDICTOR_CTRL           0x0000C000
+#define  PORTSCX_PORT_TEST_CTRL               0x000F0000
+#define  PORTSCX_WAKE_ON_CONNECT_EN           0x00100000
+#define  PORTSCX_WAKE_ON_CONNECT_DIS          0x00200000
+#define  PORTSCX_WAKE_ON_OVER_CURRENT         0x00400000
+#define  PORTSCX_PHY_LOW_POWER_SPD            0x00800000
+#define  PORTSCX_PORT_FORCE_FULL_SPEED        0x01000000
+#define  PORTSCX_PORT_SPEED_MASK              0x0C000000
+#define  PORTSCX_PORT_WIDTH                   0x10000000
+#define  PORTSCX_PHY_TYPE_SEL                 0xC0000000
+
+/* bit 11-10 are line status */
+#define  PORTSCX_LINE_STATUS_SE0              0x00000000
+#define  PORTSCX_LINE_STATUS_JSTATE           0x00000400
+#define  PORTSCX_LINE_STATUS_KSTATE           0x00000800
+#define  PORTSCX_LINE_STATUS_UNDEF            0x00000C00
+#define  PORTSCX_LINE_STATUS_BIT_POS          10
+
+/* bit 15-14 are port indicator control */
+#define  PORTSCX_PIC_OFF                      0x00000000
+#define  PORTSCX_PIC_AMBER                    0x00004000
+#define  PORTSCX_PIC_GREEN                    0x00008000
+#define  PORTSCX_PIC_UNDEF                    0x0000C000
+#define  PORTSCX_PIC_BIT_POS                  14
+
+/* bit 19-16 are port test control */
+#define  PORTSCX_PTC_DISABLE                  0x00000000
+#define  PORTSCX_PTC_JSTATE                   0x00010000
+#define  PORTSCX_PTC_KSTATE                   0x00020000
+#define  PORTSCX_PTC_SEQNAK                   0x00030000
+#define  PORTSCX_PTC_PACKET                   0x00040000
+#define  PORTSCX_PTC_FORCE_EN                 0x00050000
+#define  PORTSCX_PTC_BIT_POS                  16
+
+/* bit 27-26 are port speed */
+#define  PORTSCX_PORT_SPEED_FULL              0x00000000
+#define  PORTSCX_PORT_SPEED_LOW               0x04000000
+#define  PORTSCX_PORT_SPEED_HIGH              0x08000000
+#define  PORTSCX_PORT_SPEED_UNDEF             0x0C000000
+#define  PORTSCX_SPEED_BIT_POS                26
+
+/* bit 28 is parallel transceiver width for UTMI interface */
+#define  PORTSCX_PTW                          0x10000000
+#define  PORTSCX_PTW_8BIT                     0x00000000
+#define  PORTSCX_PTW_16BIT                    0x10000000
+
+/* bit 31-30 are port transceiver select */
+#define  PORTSCX_PTS_UTMI                     0x00000000
+#define  PORTSCX_PTS_ULPI                     0x80000000
+#define  PORTSCX_PTS_FSLS                     0xC0000000
+#define  PORTSCX_PTS_BIT_POS                  30
+
+/* otgsc Register Bit Masks */
+#define  OTGSC_CTRL_VUSB_DISCHARGE            0x00000001
+#define  OTGSC_CTRL_VUSB_CHARGE               0x00000002
+#define  OTGSC_CTRL_OTG_TERM                  0x00000008
+#define  OTGSC_CTRL_DATA_PULSING              0x00000010
+#define  OTGSC_STS_USB_ID                     0x00000100
+#define  OTGSC_STS_A_VBUS_VALID               0x00000200
+#define  OTGSC_STS_A_SESSION_VALID            0x00000400
+#define  OTGSC_STS_B_SESSION_VALID            0x00000800
+#define  OTGSC_STS_B_SESSION_END              0x00001000
+#define  OTGSC_STS_1MS_TOGGLE                 0x00002000
+#define  OTGSC_STS_DATA_PULSING               0x00004000
+#define  OTGSC_INTSTS_USB_ID                  0x00010000
+#define  OTGSC_INTSTS_A_VBUS_VALID            0x00020000
+#define  OTGSC_INTSTS_A_SESSION_VALID         0x00040000
+#define  OTGSC_INTSTS_B_SESSION_VALID         0x00080000
+#define  OTGSC_INTSTS_B_SESSION_END           0x00100000
+#define  OTGSC_INTSTS_1MS                     0x00200000
+#define  OTGSC_INTSTS_DATA_PULSING            0x00400000
+#define  OTGSC_INTR_USB_ID                    0x01000000
+#define  OTGSC_INTR_A_VBUS_VALID              0x02000000
+#define  OTGSC_INTR_A_SESSION_VALID           0x04000000
+#define  OTGSC_INTR_B_SESSION_VALID           0x08000000
+#define  OTGSC_INTR_B_SESSION_END             0x10000000
+#define  OTGSC_INTR_1MS_TIMER                 0x20000000
+#define  OTGSC_INTR_DATA_PULSING              0x40000000
+
+/* USB MODE Register Bit Masks */
+#define  USB_MODE_CTRL_MODE_IDLE              0x00000000
+#define  USB_MODE_CTRL_MODE_DEVICE            0x00000002
+#define  USB_MODE_CTRL_MODE_HOST              0x00000003
+#define  USB_MODE_CTRL_MODE_MASK              0x00000003
+#define  USB_MODE_CTRL_MODE_RSV               0x00000001
+#define  USB_MODE_ES                          0x00000004 /* Endian Select */
+#define  USB_MODE_SETUP_LOCK_OFF              0x00000008
+#define  USB_MODE_STREAM_DISABLE              0x00000010
+/* Endpoint Flush Register */
+#define EPFLUSH_TX_OFFSET		      0x00010000
+#define EPFLUSH_RX_OFFSET		      0x00000000
+
+/* Endpoint Setup Status bit masks */
+#define  EP_SETUP_STATUS_MASK                 0x0000003F
+#define  EP_SETUP_STATUS_EP0		      0x00000001
+
+/* ENDPOINTCTRLx  Register Bit Masks */
+#define  EPCTRL_TX_ENABLE                     0x00800000
+#define  EPCTRL_TX_DATA_TOGGLE_RST            0x00400000	/* Not EP0 */
+#define  EPCTRL_TX_DATA_TOGGLE_INH            0x00200000	/* Not EP0 */
+#define  EPCTRL_TX_TYPE                       0x000C0000
+#define  EPCTRL_TX_DATA_SOURCE                0x00020000	/* Not EP0 */
+#define  EPCTRL_TX_EP_STALL                   0x00010000
+#define  EPCTRL_RX_ENABLE                     0x00000080
+#define  EPCTRL_RX_DATA_TOGGLE_RST            0x00000040	/* Not EP0 */
+#define  EPCTRL_RX_DATA_TOGGLE_INH            0x00000020	/* Not EP0 */
+#define  EPCTRL_RX_TYPE                       0x0000000C
+#define  EPCTRL_RX_DATA_SINK                  0x00000002	/* Not EP0 */
+#define  EPCTRL_RX_EP_STALL                   0x00000001
+
+/* bit 19-18 and 3-2 are endpoint type */
+#define  EPCTRL_EP_TYPE_CONTROL               0
+#define  EPCTRL_EP_TYPE_ISO                   1
+#define  EPCTRL_EP_TYPE_BULK                  2
+#define  EPCTRL_EP_TYPE_INTERRUPT             3
+#define  EPCTRL_TX_EP_TYPE_SHIFT              18
+#define  EPCTRL_RX_EP_TYPE_SHIFT              2
+
+/* SNOOPn Register Bit Masks */
+#define  SNOOP_ADDRESS_MASK                   0xFFFFF000
+#define  SNOOP_SIZE_ZERO                      0x00	/* snooping disable */
+#define  SNOOP_SIZE_4KB                       0x0B	/* 4KB snoop size */
+#define  SNOOP_SIZE_8KB                       0x0C
+#define  SNOOP_SIZE_16KB                      0x0D
+#define  SNOOP_SIZE_32KB                      0x0E
+#define  SNOOP_SIZE_64KB                      0x0F
+#define  SNOOP_SIZE_128KB                     0x10
+#define  SNOOP_SIZE_256KB                     0x11
+#define  SNOOP_SIZE_512KB                     0x12
+#define  SNOOP_SIZE_1MB                       0x13
+#define  SNOOP_SIZE_2MB                       0x14
+#define  SNOOP_SIZE_4MB                       0x15
+#define  SNOOP_SIZE_8MB                       0x16
+#define  SNOOP_SIZE_16MB                      0x17
+#define  SNOOP_SIZE_32MB                      0x18
+#define  SNOOP_SIZE_64MB                      0x19
+#define  SNOOP_SIZE_128MB                     0x1A
+#define  SNOOP_SIZE_256MB                     0x1B
+#define  SNOOP_SIZE_512MB                     0x1C
+#define  SNOOP_SIZE_1GB                       0x1D
+#define  SNOOP_SIZE_2GB                       0x1E	/* 2GB snoop size */
+
+/* pri_ctrl Register Bit Masks */
+#define  PRI_CTRL_PRI_LVL1                    0x0000000C
+#define  PRI_CTRL_PRI_LVL0                    0x00000003
+
+/* si_ctrl Register Bit Masks */
+#define  SI_CTRL_ERR_DISABLE                  0x00000010
+#define  SI_CTRL_IDRC_DISABLE                 0x00000008
+#define  SI_CTRL_RD_SAFE_EN                   0x00000004
+#define  SI_CTRL_RD_PREFETCH_DISABLE          0x00000002
+#define  SI_CTRL_RD_PREFEFETCH_VAL            0x00000001
+
+/* control Register Bit Masks */
+#define  USB_CTRL_IOENB                       0x00000004
+#define  USB_CTRL_ULPI_INT0EN                 0x00000001
+
+/* Endpoint Queue Head data struct
+ * Rem: all the variables of qh are LittleEndian Mode
+ * and NEXT_POINTER_MASK should operate on a LittleEndian, Phy Addr
+ */
+struct ep_queue_head {
+	u32 max_pkt_length;	/* Mult(31-30) , Zlt(29) , Max Pkt len
+				   and IOS(15) */
+	u32 curr_dtd_ptr;	/* Current dTD Pointer(31-5) */
+	u32 next_dtd_ptr;	/* Next dTD Pointer(31-5), T(0) */
+	u32 size_ioc_int_sts;	/* Total bytes (30-16), IOC (15),
+				   MultO(11-10), STS (7-0)  */
+	u32 buff_ptr0;		/* Buffer pointer Page 0 (31-12) */
+	u32 buff_ptr1;		/* Buffer pointer Page 1 (31-12) */
+	u32 buff_ptr2;		/* Buffer pointer Page 2 (31-12) */
+	u32 buff_ptr3;		/* Buffer pointer Page 3 (31-12) */
+	u32 buff_ptr4;		/* Buffer pointer Page 4 (31-12) */
+	u32 res1;
+	u8 setup_buffer[8];	/* Setup data 8 bytes */
+	u32 res2[4];
+};
+
+/* Endpoint Queue Head Bit Masks */
+#define  EP_QUEUE_HEAD_MULT_POS               30
+#define  EP_QUEUE_HEAD_ZLT_SEL                0x20000000
+#define  EP_QUEUE_HEAD_MAX_PKT_LEN_POS        16
+#define  EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info)   (((ep_info)>>16)&0x07ff)
+#define  EP_QUEUE_HEAD_IOS                    0x00008000
+#define  EP_QUEUE_HEAD_NEXT_TERMINATE         0x00000001
+#define  EP_QUEUE_HEAD_IOC                    0x00008000
+#define  EP_QUEUE_HEAD_MULTO                  0x00000C00
+#define  EP_QUEUE_HEAD_STATUS_HALT	      0x00000040
+#define  EP_QUEUE_HEAD_STATUS_ACTIVE          0x00000080
+#define  EP_QUEUE_CURRENT_OFFSET_MASK         0x00000FFF
+#define  EP_QUEUE_HEAD_NEXT_POINTER_MASK      0xFFFFFFE0
+#define  EP_QUEUE_FRINDEX_MASK                0x000007FF
+#define  EP_MAX_LENGTH_TRANSFER               0x4000
+
+/* Endpoint Transfer Descriptor data struct */
+/* Rem: all the variables of td are LittleEndian Mode */
+struct ep_td_struct {
+	u32 next_td_ptr;	/* Next TD pointer(31-5), T(0) set
+				   indicate invalid */
+	u32 size_ioc_sts;	/* Total bytes (30-16), IOC (15),
+				   MultO(11-10), STS (7-0)  */
+	u32 buff_ptr0;		/* Buffer pointer Page 0 */
+	u32 buff_ptr1;		/* Buffer pointer Page 1 */
+	u32 buff_ptr2;		/* Buffer pointer Page 2 */
+	u32 buff_ptr3;		/* Buffer pointer Page 3 */
+	u32 buff_ptr4;		/* Buffer pointer Page 4 */
+	u32 res;
+	/* 32 bytes */
+	dma_addr_t td_dma;	/* dma address for this td */
+	/* virtual address of next td specified in next_td_ptr */
+	struct ep_td_struct *next_td_virt;
+};
+
+/* Endpoint Transfer Descriptor bit Masks */
+#define  DTD_NEXT_TERMINATE                   0x00000001
+#define  DTD_IOC                              0x00008000
+#define  DTD_STATUS_ACTIVE                    0x00000080
+#define  DTD_STATUS_HALTED                    0x00000040
+#define  DTD_STATUS_DATA_BUFF_ERR             0x00000020
+#define  DTD_STATUS_TRANSACTION_ERR           0x00000008
+#define  DTD_RESERVED_FIELDS                  0x80007300
+#define  DTD_ADDR_MASK                        0xFFFFFFE0
+#define  DTD_PACKET_SIZE                      0x7FFF0000
+#define  DTD_LENGTH_BIT_POS                   16
+#define  DTD_ERROR_MASK                       (DTD_STATUS_HALTED | \
+                                               DTD_STATUS_DATA_BUFF_ERR | \
+                                               DTD_STATUS_TRANSACTION_ERR)
+/* Alignment requirements; must be a power of two */
+#define DTD_ALIGNMENT				0x20
+#define QH_ALIGNMENT				2048
+
+/* Controller dma boundary */
+#define UDC_DMA_BOUNDARY			0x1000
+
+/*-------------------------------------------------------------------------*/
+
+/* ### driver private data
+ */
+struct fsl_req {
+	struct usb_request req;
+	struct list_head queue;
+	/* ep_queue() func will add
+	   a request->queue into a udc_ep->queue 'd tail */
+	struct fsl_ep *ep;
+	unsigned mapped:1;
+
+	struct ep_td_struct *head, *tail;	/* For dTD List
+						   cpu endian Virtual addr */
+	unsigned int dtd_count;
+};
+
+#define REQ_UNCOMPLETE			1
+
+struct fsl_ep {
+	struct usb_ep ep;
+	struct list_head queue;
+	struct fsl_udc *udc;
+	struct ep_queue_head *qh;
+	const struct usb_endpoint_descriptor *desc;
+	struct usb_gadget *gadget;
+
+	char name[14];
+	unsigned stopped:1;
+};
+
+#define EP_DIR_IN	1
+#define EP_DIR_OUT	0
+
+struct fsl_udc {
+	struct usb_gadget gadget;
+	struct usb_gadget_driver *driver;
+	struct fsl_usb2_platform_data *pdata;
+	struct completion *done;	/* to make sure release() is done */
+	struct fsl_ep *eps;
+	unsigned int max_ep;
+	unsigned int irq;
+
+	struct usb_ctrlrequest local_setup_buff;
+	spinlock_t lock;
+	struct usb_phy *transceiver;
+	unsigned softconnect:1;
+	unsigned vbus_active:1;
+	unsigned stopped:1;
+	unsigned remote_wakeup:1;
+	unsigned already_stopped:1;
+	unsigned big_endian_desc:1;
+
+	struct ep_queue_head *ep_qh;	/* Endpoints Queue-Head */
+	struct fsl_req *status_req;	/* ep0 status request */
+	struct dma_pool *td_pool;	/* dma pool for DTD */
+	enum fsl_usb2_phy_modes phy_mode;
+
+	size_t ep_qh_size;		/* size after alignment adjustment*/
+	dma_addr_t ep_qh_dma;		/* dma address of QH */
+
+	u32 max_pipes;          /* Device max pipes */
+	u32 bus_reset;		/* Device is bus resetting */
+	u32 resume_state;	/* USB state to resume */
+	u32 usb_state;		/* USB current state */
+	u32 ep0_state;		/* Endpoint zero state */
+	u32 ep0_dir;		/* Endpoint zero direction: can be
+				   USB_DIR_IN or USB_DIR_OUT */
+	u8 device_address;	/* Device USB address */
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef DEBUG
+#define DBG(fmt, args...) 	printk(KERN_DEBUG "[%s]  " fmt "\n", \
+				__func__, ## args)
+#else
+#define DBG(fmt, args...)	do{}while(0)
+#endif
+
+#if 0
+static void dump_msg(const char *label, const u8 * buf, unsigned int length)
+{
+	unsigned int start, num, i;
+	char line[52], *p;
+
+	if (length >= 512)
+		return;
+	DBG("%s, length %u:\n", label, length);
+	start = 0;
+	while (length > 0) {
+		num = min(length, 16u);
+		p = line;
+		for (i = 0; i < num; ++i) {
+			if (i == 8)
+				*p++ = ' ';
+			sprintf(p, " %02x", buf[i]);
+			p += 3;
+		}
+		*p = 0;
+		printk(KERN_DEBUG "%6x: %s\n", start, line);
+		buf += num;
+		start += num;
+		length -= num;
+	}
+}
+#endif
+
+#ifdef VERBOSE
+#define VDBG		DBG
+#else
+#define VDBG(stuff...)	do{}while(0)
+#endif
+
+#define ERR(stuff...)		pr_err("udc: " stuff)
+#define WARNING(stuff...)		pr_warning("udc: " stuff)
+#define INFO(stuff...)		pr_info("udc: " stuff)
+
+/*-------------------------------------------------------------------------*/
+
+/* ### Add board specific defines here
+ */
+
+/*
+ * ### pipe direction macro from device view
+ */
+#define USB_RECV	0	/* OUT EP */
+#define USB_SEND	1	/* IN EP */
+
+/*
+ * ### internal used help routines.
+ */
+#define ep_index(EP)		((EP)->desc->bEndpointAddress&0xF)
+#define ep_maxpacket(EP)	((EP)->ep.maxpacket)
+#define ep_is_in(EP)	( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
+			USB_DIR_IN ):((EP)->desc->bEndpointAddress \
+			& USB_DIR_IN)==USB_DIR_IN)
+#define get_ep_by_pipe(udc, pipe)	((pipe == 1)? &udc->eps[0]: \
+					&udc->eps[pipe])
+#define get_pipe_by_windex(windex)	((windex & USB_ENDPOINT_NUMBER_MASK) \
+					* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
+#define get_pipe_by_ep(EP)	(ep_index(EP) * 2 + ep_is_in(EP))
+
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+	/* we only have one ep0 structure but two queue heads */
+	if (ep_index(ep) != 0)
+		return ep->qh;
+	else
+		return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+				USB_DIR_IN) ? 1 : 0];
+}
+
+struct platform_device;
+#ifdef CONFIG_ARCH_MXC
+int fsl_udc_clk_init(struct platform_device *pdev);
+void fsl_udc_clk_finalize(struct platform_device *pdev);
+void fsl_udc_clk_release(void);
+#else
+static inline int fsl_udc_clk_init(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+}
+static inline void fsl_udc_clk_release(void)
+{
+}
+#endif
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fusb300_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fusb300_udc.c
new file mode 100644
index 0000000..5831cb4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fusb300_udc.c
@@ -0,0 +1,1561 @@
+/*
+ * Fusb300 UDC (USB gadget)
+ *
+ * Copyright (C) 2010 Faraday Technology Corp.
+ *
+ * Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "fusb300_udc.h"
+
+MODULE_DESCRIPTION("FUSB300  USB gadget driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_ALIAS("platform:fusb300_udc");
+
+#define DRIVER_VERSION	"20 October 2010"
+
+static const char udc_name[] = "fusb300_udc";
+static const char * const fusb300_ep_name[] = {
+	"ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9",
+	"ep10", "ep11", "ep12", "ep13", "ep14", "ep15"
+};
+
+static void done(struct fusb300_ep *ep, struct fusb300_request *req,
+		 int status);
+
+static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset,
+			       u32 value)
+{
+	u32 reg = ioread32(fusb300->reg + offset);
+
+	reg |= value;
+	iowrite32(reg, fusb300->reg + offset);
+}
+
+static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset,
+				u32 value)
+{
+	u32 reg = ioread32(fusb300->reg + offset);
+
+	reg &= ~value;
+	iowrite32(reg, fusb300->reg + offset);
+}
+
+
+static void fusb300_ep_setting(struct fusb300_ep *ep,
+			       struct fusb300_ep_info info)
+{
+	ep->epnum = info.epnum;
+	ep->type = info.type;
+}
+
+static int fusb300_ep_release(struct fusb300_ep *ep)
+{
+	if (!ep->epnum)
+		return 0;
+	ep->epnum = 0;
+	ep->stall = 0;
+	ep->wedged = 0;
+	return 0;
+}
+
+static void fusb300_set_fifo_entry(struct fusb300 *fusb300,
+				   u32 ep)
+{
+	u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+
+	val &= ~FUSB300_EPSET1_FIFOENTRY_MSK;
+	val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM);
+	iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+}
+
+static void fusb300_set_start_entry(struct fusb300 *fusb300,
+				    u8 ep)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+	u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM;
+
+	reg &= ~FUSB300_EPSET1_START_ENTRY_MSK	;
+	reg |= FUSB300_EPSET1_START_ENTRY(start_entry);
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+	if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) {
+		fusb300->fifo_entry_num = 0;
+		fusb300->addrofs = 0;
+		pr_err("fifo entry is over the maximum number!\n");
+	} else
+		fusb300->fifo_entry_num++;
+}
+
+/* set fusb300_set_start_entry first before fusb300_set_epaddrofs */
+static void fusb300_set_epaddrofs(struct fusb300 *fusb300,
+				  struct fusb300_ep_info info)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+
+	reg &= ~FUSB300_EPSET2_ADDROFS_MSK;
+	reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs);
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+	fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM;
+}
+
+static void ep_fifo_setting(struct fusb300 *fusb300,
+			    struct fusb300_ep_info info)
+{
+	fusb300_set_fifo_entry(fusb300, info.epnum);
+	fusb300_set_start_entry(fusb300, info.epnum);
+	fusb300_set_epaddrofs(fusb300, info);
+}
+
+static void fusb300_set_eptype(struct fusb300 *fusb300,
+			       struct fusb300_ep_info info)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+	reg &= ~FUSB300_EPSET1_TYPE_MSK;
+	reg |= FUSB300_EPSET1_TYPE(info.type);
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_epdir(struct fusb300 *fusb300,
+			      struct fusb300_ep_info info)
+{
+	u32 reg;
+
+	if (!info.dir_in)
+		return;
+	reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+	reg &= ~FUSB300_EPSET1_DIR_MSK;
+	reg |= FUSB300_EPSET1_DIRIN;
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_ep_active(struct fusb300 *fusb300,
+			  u8 ep)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+
+	reg |= FUSB300_EPSET1_ACTEN;
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+}
+
+static void fusb300_set_epmps(struct fusb300 *fusb300,
+			      struct fusb300_ep_info info)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+
+	reg &= ~FUSB300_EPSET2_MPS_MSK;
+	reg |= FUSB300_EPSET2_MPS(info.maxpacket);
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+}
+
+static void fusb300_set_interval(struct fusb300 *fusb300,
+				 struct fusb300_ep_info info)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+	reg &= ~FUSB300_EPSET1_INTERVAL(0x7);
+	reg |= FUSB300_EPSET1_INTERVAL(info.interval);
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_bwnum(struct fusb300 *fusb300,
+			      struct fusb300_ep_info info)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+	reg &= ~FUSB300_EPSET1_BWNUM(0x3);
+	reg |= FUSB300_EPSET1_BWNUM(info.bw_num);
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void set_ep_reg(struct fusb300 *fusb300,
+		      struct fusb300_ep_info info)
+{
+	fusb300_set_eptype(fusb300, info);
+	fusb300_set_epdir(fusb300, info);
+	fusb300_set_epmps(fusb300, info);
+
+	if (info.interval)
+		fusb300_set_interval(fusb300, info);
+
+	if (info.bw_num)
+		fusb300_set_bwnum(fusb300, info);
+
+	fusb300_set_ep_active(fusb300, info.epnum);
+}
+
+static int config_ep(struct fusb300_ep *ep,
+		     const struct usb_endpoint_descriptor *desc)
+{
+	struct fusb300 *fusb300 = ep->fusb300;
+	struct fusb300_ep_info info;
+
+	ep->desc = desc;
+
+	info.interval = 0;
+	info.addrofs = 0;
+	info.bw_num = 0;
+
+	info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+	info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
+	info.maxpacket = usb_endpoint_maxp(desc);
+	info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+
+	if ((info.type == USB_ENDPOINT_XFER_INT) ||
+	   (info.type == USB_ENDPOINT_XFER_ISOC)) {
+		info.interval = desc->bInterval;
+		if (info.type == USB_ENDPOINT_XFER_ISOC)
+			info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11);
+	}
+
+	ep_fifo_setting(fusb300, info);
+
+	set_ep_reg(fusb300, info);
+
+	fusb300_ep_setting(ep, info);
+
+	fusb300->ep[info.epnum] = ep;
+
+	return 0;
+}
+
+static int fusb300_enable(struct usb_ep *_ep,
+			  const struct usb_endpoint_descriptor *desc)
+{
+	struct fusb300_ep *ep;
+
+	ep = container_of(_ep, struct fusb300_ep, ep);
+
+	if (ep->fusb300->reenum) {
+		ep->fusb300->fifo_entry_num = 0;
+		ep->fusb300->addrofs = 0;
+		ep->fusb300->reenum = 0;
+	}
+
+	return config_ep(ep, desc);
+}
+
+static int fusb300_disable(struct usb_ep *_ep)
+{
+	struct fusb300_ep *ep;
+	struct fusb300_request *req;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct fusb300_ep, ep);
+
+	BUG_ON(!ep);
+
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct fusb300_request, queue);
+		spin_lock_irqsave(&ep->fusb300->lock, flags);
+		done(ep, req, -ECONNRESET);
+		spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+	}
+
+	return fusb300_ep_release(ep);
+}
+
+static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep,
+						gfp_t gfp_flags)
+{
+	struct fusb300_request *req;
+
+	req = kzalloc(sizeof(struct fusb300_request), gfp_flags);
+	if (!req)
+		return NULL;
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct fusb300_request *req;
+
+	req = container_of(_req, struct fusb300_request, req);
+	kfree(req);
+}
+
+static int enable_fifo_int(struct fusb300_ep *ep)
+{
+	struct fusb300 *fusb300 = ep->fusb300;
+
+	if (ep->epnum) {
+		fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0,
+			FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
+	} else {
+		pr_err("can't enable_fifo_int ep0\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int disable_fifo_int(struct fusb300_ep *ep)
+{
+	struct fusb300 *fusb300 = ep->fusb300;
+
+	if (ep->epnum) {
+		fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0,
+			FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
+	} else {
+		pr_err("can't disable_fifo_int ep0\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length)
+{
+	u32 reg;
+
+	reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
+	reg &= ~FUSB300_CSR_LEN_MSK;
+	reg |= FUSB300_CSR_LEN(length);
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR);
+}
+
+/* write data to cx fifo */
+static void fusb300_wrcxf(struct fusb300_ep *ep,
+		   struct fusb300_request *req)
+{
+	int i = 0;
+	u8 *tmp;
+	u32 data;
+	struct fusb300 *fusb300 = ep->fusb300;
+	u32 length = req->req.length - req->req.actual;
+
+	tmp = req->req.buf + req->req.actual;
+
+	if (length > SS_CTL_MAX_PACKET_SIZE) {
+		fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE);
+		for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) {
+			data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
+				*(tmp + 3) << 24;
+			iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+			tmp += 4;
+		}
+		req->req.actual += SS_CTL_MAX_PACKET_SIZE;
+	} else { /* length is less than max packet size */
+		fusb300_set_cxlen(fusb300, length);
+		for (i = length >> 2; i > 0; i--) {
+			data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
+				*(tmp + 3) << 24;
+			printk(KERN_DEBUG "    0x%x\n", data);
+			iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+			tmp = tmp + 4;
+		}
+		switch (length % 4) {
+		case 1:
+			data = *tmp;
+			printk(KERN_DEBUG "    0x%x\n", data);
+			iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+			break;
+		case 2:
+			data = *tmp | *(tmp + 1) << 8;
+			printk(KERN_DEBUG "    0x%x\n", data);
+			iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+			break;
+		case 3:
+			data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
+			printk(KERN_DEBUG "    0x%x\n", data);
+			iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+			break;
+		default:
+			break;
+		}
+		req->req.actual += length;
+	}
+}
+
+static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+	fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
+		FUSB300_EPSET0_STL);
+}
+
+static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+
+	if (reg & FUSB300_EPSET0_STL) {
+		printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep);
+		reg &= ~FUSB300_EPSET0_STL;
+		iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+	}
+}
+
+static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req)
+{
+	if (ep->fusb300->ep0_dir) { /* if IN */
+		if (req->req.length) {
+			fusb300_wrcxf(ep, req);
+		} else
+			printk(KERN_DEBUG "%s : req->req.length = 0x%x\n",
+				__func__, req->req.length);
+		if ((req->req.length == req->req.actual) ||
+		    (req->req.actual < ep->ep.maxpacket))
+			done(ep, req, 0);
+	} else { /* OUT */
+		if (!req->req.length)
+			done(ep, req, 0);
+		else
+			fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1,
+				FUSB300_IGER1_CX_OUT_INT);
+	}
+}
+
+static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req,
+			 gfp_t gfp_flags)
+{
+	struct fusb300_ep *ep;
+	struct fusb300_request *req;
+	unsigned long flags;
+	int request  = 0;
+
+	ep = container_of(_ep, struct fusb300_ep, ep);
+	req = container_of(_req, struct fusb300_request, req);
+
+	if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&ep->fusb300->lock, flags);
+
+	if (list_empty(&ep->queue))
+		request = 1;
+
+	list_add_tail(&req->queue, &ep->queue);
+
+	req->req.actual = 0;
+	req->req.status = -EINPROGRESS;
+
+	if (ep->desc == NULL) /* ep0 */
+		ep0_queue(ep, req);
+	else if (request && !ep->stall)
+		enable_fifo_int(ep);
+
+	spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+
+	return 0;
+}
+
+static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct fusb300_ep *ep;
+	struct fusb300_request *req;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct fusb300_ep, ep);
+	req = container_of(_req, struct fusb300_request, req);
+
+	spin_lock_irqsave(&ep->fusb300->lock, flags);
+	if (!list_empty(&ep->queue))
+		done(ep, req, -ECONNRESET);
+	spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+
+	return 0;
+}
+
+static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
+{
+	struct fusb300_ep *ep;
+	struct fusb300 *fusb300;
+	unsigned long flags;
+	int ret = 0;
+
+	ep = container_of(_ep, struct fusb300_ep, ep);
+
+	fusb300 = ep->fusb300;
+
+	spin_lock_irqsave(&ep->fusb300->lock, flags);
+
+	if (!list_empty(&ep->queue)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	if (value) {
+		fusb300_set_epnstall(fusb300, ep->epnum);
+		ep->stall = 1;
+		if (wedge)
+			ep->wedged = 1;
+	} else {
+		fusb300_clear_epnstall(fusb300, ep->epnum);
+		ep->stall = 0;
+		ep->wedged = 0;
+	}
+
+out:
+	spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+	return ret;
+}
+
+static int fusb300_set_halt(struct usb_ep *_ep, int value)
+{
+	return fusb300_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int fusb300_set_wedge(struct usb_ep *_ep)
+{
+	return fusb300_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static void fusb300_fifo_flush(struct usb_ep *_ep)
+{
+}
+
+static struct usb_ep_ops fusb300_ep_ops = {
+	.enable		= fusb300_enable,
+	.disable	= fusb300_disable,
+
+	.alloc_request	= fusb300_alloc_request,
+	.free_request	= fusb300_free_request,
+
+	.queue		= fusb300_queue,
+	.dequeue	= fusb300_dequeue,
+
+	.set_halt	= fusb300_set_halt,
+	.fifo_flush	= fusb300_fifo_flush,
+	.set_wedge	= fusb300_set_wedge,
+};
+
+/*****************************************************************************/
+static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset,
+		       u32 value)
+{
+	iowrite32(value, fusb300->reg + offset);
+}
+
+static void fusb300_reset(void)
+{
+}
+
+static void fusb300_set_cxstall(struct fusb300 *fusb300)
+{
+	fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
+			   FUSB300_CSR_STL);
+}
+
+static void fusb300_set_cxdone(struct fusb300 *fusb300)
+{
+	fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
+			   FUSB300_CSR_DONE);
+}
+
+/* read data from cx fifo */
+void fusb300_rdcxf(struct fusb300 *fusb300,
+		   u8 *buffer, u32 length)
+{
+	int i = 0;
+	u8 *tmp;
+	u32 data;
+
+	tmp = buffer;
+
+	for (i = (length >> 2); i > 0; i--) {
+		data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+		printk(KERN_DEBUG "    0x%x\n", data);
+		*tmp = data & 0xFF;
+		*(tmp + 1) = (data >> 8) & 0xFF;
+		*(tmp + 2) = (data >> 16) & 0xFF;
+		*(tmp + 3) = (data >> 24) & 0xFF;
+		tmp = tmp + 4;
+	}
+
+	switch (length % 4) {
+	case 1:
+		data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+		printk(KERN_DEBUG "    0x%x\n", data);
+		*tmp = data & 0xFF;
+		break;
+	case 2:
+		data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+		printk(KERN_DEBUG "    0x%x\n", data);
+		*tmp = data & 0xFF;
+		*(tmp + 1) = (data >> 8) & 0xFF;
+		break;
+	case 3:
+		data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+		printk(KERN_DEBUG "    0x%x\n", data);
+		*tmp = data & 0xFF;
+		*(tmp + 1) = (data >> 8) & 0xFF;
+		*(tmp + 2) = (data >> 16) & 0xFF;
+		break;
+	default:
+		break;
+	}
+}
+
+static void fusb300_rdfifo(struct fusb300_ep *ep,
+			  struct fusb300_request *req,
+			  u32 length)
+{
+	int i = 0;
+	u8 *tmp;
+	u32 data, reg;
+	struct fusb300 *fusb300 = ep->fusb300;
+
+	tmp = req->req.buf + req->req.actual;
+	req->req.actual += length;
+
+	if (req->req.actual > req->req.length)
+		printk(KERN_DEBUG "req->req.actual > req->req.length\n");
+
+	for (i = (length >> 2); i > 0; i--) {
+		data = ioread32(fusb300->reg +
+			FUSB300_OFFSET_EPPORT(ep->epnum));
+		*tmp = data & 0xFF;
+		*(tmp + 1) = (data >> 8) & 0xFF;
+		*(tmp + 2) = (data >> 16) & 0xFF;
+		*(tmp + 3) = (data >> 24) & 0xFF;
+		tmp = tmp + 4;
+	}
+
+	switch (length % 4) {
+	case 1:
+		data = ioread32(fusb300->reg +
+			FUSB300_OFFSET_EPPORT(ep->epnum));
+		*tmp = data & 0xFF;
+		break;
+	case 2:
+		data = ioread32(fusb300->reg +
+			FUSB300_OFFSET_EPPORT(ep->epnum));
+		*tmp = data & 0xFF;
+		*(tmp + 1) = (data >> 8) & 0xFF;
+		break;
+	case 3:
+		data = ioread32(fusb300->reg +
+			FUSB300_OFFSET_EPPORT(ep->epnum));
+		*tmp = data & 0xFF;
+		*(tmp + 1) = (data >> 8) & 0xFF;
+		*(tmp + 2) = (data >> 16) & 0xFF;
+		break;
+	default:
+		break;
+	}
+
+	do {
+		reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
+		reg &= FUSB300_IGR1_SYNF0_EMPTY_INT;
+		if (i)
+			printk(KERN_INFO "sync fifo is not empty!\n");
+		i++;
+	} while (!reg);
+}
+
+static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+	u8 value;
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+
+	value = reg & FUSB300_EPSET0_STL;
+
+	return value;
+}
+
+static u8 fusb300_get_cxstall(struct fusb300 *fusb300)
+{
+	u8 value;
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
+
+	value = (reg & FUSB300_CSR_STL) >> 1;
+
+	return value;
+}
+
+static void request_error(struct fusb300 *fusb300)
+{
+	fusb300_set_cxstall(fusb300);
+	printk(KERN_DEBUG "request error!!\n");
+}
+
+static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+__releases(fusb300->lock)
+__acquires(fusb300->lock)
+{
+	u8 ep;
+	u16 status = 0;
+	u16 w_index = ctrl->wIndex;
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		status = 1 << USB_DEVICE_SELF_POWERED;
+		break;
+	case USB_RECIP_INTERFACE:
+		status = 0;
+		break;
+	case USB_RECIP_ENDPOINT:
+		ep = w_index & USB_ENDPOINT_NUMBER_MASK;
+		if (ep) {
+			if (fusb300_get_epnstall(fusb300, ep))
+				status = 1 << USB_ENDPOINT_HALT;
+		} else {
+			if (fusb300_get_cxstall(fusb300))
+				status = 0;
+		}
+		break;
+
+	default:
+		request_error(fusb300);
+		return;		/* exit */
+	}
+
+	fusb300->ep0_data = cpu_to_le16(status);
+	fusb300->ep0_req->buf = &fusb300->ep0_data;
+	fusb300->ep0_req->length = 2;
+
+	spin_unlock(&fusb300->lock);
+	fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL);
+	spin_lock(&fusb300->lock);
+}
+
+static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+	u8 ep;
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		fusb300_set_cxdone(fusb300);
+		break;
+	case USB_RECIP_INTERFACE:
+		fusb300_set_cxdone(fusb300);
+		break;
+	case USB_RECIP_ENDPOINT: {
+		u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+		ep = w_index & USB_ENDPOINT_NUMBER_MASK;
+		if (ep)
+			fusb300_set_epnstall(fusb300, ep);
+		else
+			fusb300_set_cxstall(fusb300);
+		fusb300_set_cxdone(fusb300);
+		}
+		break;
+	default:
+		request_error(fusb300);
+		break;
+	}
+}
+
+static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep)
+{
+	fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
+			    FUSB300_EPSET0_CLRSEQNUM);
+}
+
+static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+	struct fusb300_ep *ep =
+		fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		fusb300_set_cxdone(fusb300);
+		break;
+	case USB_RECIP_INTERFACE:
+		fusb300_set_cxdone(fusb300);
+		break;
+	case USB_RECIP_ENDPOINT:
+		if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
+			if (ep->wedged) {
+				fusb300_set_cxdone(fusb300);
+				break;
+			}
+			if (ep->stall) {
+				ep->stall = 0;
+				fusb300_clear_seqnum(fusb300, ep->epnum);
+				fusb300_clear_epnstall(fusb300, ep->epnum);
+				if (!list_empty(&ep->queue))
+					enable_fifo_int(ep);
+			}
+		}
+		fusb300_set_cxdone(fusb300);
+		break;
+	default:
+		request_error(fusb300);
+		break;
+	}
+}
+
+static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR);
+
+	reg &= ~FUSB300_DAR_DRVADDR_MSK;
+	reg |= FUSB300_DAR_DRVADDR(addr);
+
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR);
+}
+
+static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+	if (ctrl->wValue >= 0x0100)
+		request_error(fusb300);
+	else {
+		fusb300_set_dev_addr(fusb300, ctrl->wValue);
+		fusb300_set_cxdone(fusb300);
+	}
+}
+
+#define UVC_COPY_DESCRIPTORS(mem, src) \
+	do { \
+		const struct usb_descriptor_header * const *__src; \
+		for (__src = src; *__src; ++__src) { \
+			memcpy(mem, *__src, (*__src)->bLength); \
+			mem += (*__src)->bLength; \
+		} \
+	} while (0)
+
+static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+	u8 *p = (u8 *)ctrl;
+	u8 ret = 0;
+	u8 i = 0;
+
+	fusb300_rdcxf(fusb300, p, 8);
+	fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN;
+	fusb300->ep0_length = ctrl->wLength;
+
+	/* check request */
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (ctrl->bRequest) {
+		case USB_REQ_GET_STATUS:
+			get_status(fusb300, ctrl);
+			break;
+		case USB_REQ_CLEAR_FEATURE:
+			clear_feature(fusb300, ctrl);
+			break;
+		case USB_REQ_SET_FEATURE:
+			set_feature(fusb300, ctrl);
+			break;
+		case USB_REQ_SET_ADDRESS:
+			set_address(fusb300, ctrl);
+			break;
+		case USB_REQ_SET_CONFIGURATION:
+			fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR,
+					   FUSB300_DAR_SETCONFG);
+			/* clear sequence number */
+			for (i = 1; i <= FUSB300_MAX_NUM_EP; i++)
+				fusb300_clear_seqnum(fusb300, i);
+			fusb300->reenum = 1;
+			ret = 1;
+			break;
+		default:
+			ret = 1;
+			break;
+		}
+	} else
+		ret = 1;
+
+	return ret;
+}
+
+static void done(struct fusb300_ep *ep, struct fusb300_request *req,
+		 int status)
+{
+	list_del_init(&req->queue);
+
+	/* don't modify queue heads during completion callback */
+	if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
+		req->req.status = -ESHUTDOWN;
+	else
+		req->req.status = status;
+
+	spin_unlock(&ep->fusb300->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&ep->fusb300->lock);
+
+	if (ep->epnum) {
+		disable_fifo_int(ep);
+		if (!list_empty(&ep->queue))
+			enable_fifo_int(ep);
+	} else
+		fusb300_set_cxdone(ep->fusb300);
+}
+
+static void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, dma_addr_t d,
+		u32 len)
+{
+	u32 value;
+	u32 reg;
+
+	/* wait SW owner */
+	do {
+		reg = ioread32(ep->fusb300->reg +
+			FUSB300_OFFSET_EPPRD_W0(ep->epnum));
+		reg &= FUSB300_EPPRD0_H;
+	} while (reg);
+
+	iowrite32(d, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum));
+
+	value = FUSB300_EPPRD0_BTC(len) | FUSB300_EPPRD0_H |
+		FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I;
+	iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum));
+
+	iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum));
+
+	fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY,
+		FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum));
+}
+
+static void fusb300_wait_idma_finished(struct fusb300_ep *ep)
+{
+	u32 reg;
+
+	do {
+		reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1);
+		if ((reg & FUSB300_IGR1_VBUS_CHG_INT) ||
+		    (reg & FUSB300_IGR1_WARM_RST_INT) ||
+		    (reg & FUSB300_IGR1_HOT_RST_INT) ||
+		    (reg & FUSB300_IGR1_USBRST_INT)
+		)
+			goto IDMA_RESET;
+		reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0);
+		reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum);
+	} while (!reg);
+
+	fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0,
+		FUSB300_IGR0_EPn_PRD_INT(ep->epnum));
+IDMA_RESET:
+	fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGER0,
+		FUSB300_IGER0_EEPn_PRD_INT(ep->epnum));
+}
+
+static void  fusb300_set_idma(struct fusb300_ep *ep,
+			struct fusb300_request *req)
+{
+	dma_addr_t d;
+
+	d = dma_map_single(NULL, req->req.buf, req->req.length, DMA_TO_DEVICE);
+
+	if (dma_mapping_error(NULL, d)) {
+		printk(KERN_DEBUG "dma_mapping_error\n");
+		return;
+	}
+
+	dma_sync_single_for_device(NULL, d, req->req.length, DMA_TO_DEVICE);
+
+	fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0,
+		FUSB300_IGER0_EEPn_PRD_INT(ep->epnum));
+
+	fusb300_fill_idma_prdtbl(ep, d, req->req.length);
+	/* check idma is done */
+	fusb300_wait_idma_finished(ep);
+
+	dma_unmap_single(NULL, d, req->req.length, DMA_TO_DEVICE);
+}
+
+static void in_ep_fifo_handler(struct fusb300_ep *ep)
+{
+	struct fusb300_request *req = list_entry(ep->queue.next,
+					struct fusb300_request, queue);
+
+	if (req->req.length)
+		fusb300_set_idma(ep, req);
+	done(ep, req, 0);
+}
+
+static void out_ep_fifo_handler(struct fusb300_ep *ep)
+{
+	struct fusb300 *fusb300 = ep->fusb300;
+	struct fusb300_request *req = list_entry(ep->queue.next,
+						 struct fusb300_request, queue);
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
+	u32 length = reg & FUSB300_FFR_BYCNT;
+
+	fusb300_rdfifo(ep, req, length);
+
+	/* finish out transfer */
+	if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket))
+		done(ep, req, 0);
+}
+
+static void check_device_mode(struct fusb300 *fusb300)
+{
+	u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR);
+
+	switch (reg & FUSB300_GCR_DEVEN_MSK) {
+	case FUSB300_GCR_DEVEN_SS:
+		fusb300->gadget.speed = USB_SPEED_SUPER;
+		break;
+	case FUSB300_GCR_DEVEN_HS:
+		fusb300->gadget.speed = USB_SPEED_HIGH;
+		break;
+	case FUSB300_GCR_DEVEN_FS:
+		fusb300->gadget.speed = USB_SPEED_FULL;
+		break;
+	default:
+		fusb300->gadget.speed = USB_SPEED_UNKNOWN;
+		break;
+	}
+	printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK));
+}
+
+
+static void fusb300_ep0out(struct fusb300 *fusb300)
+{
+	struct fusb300_ep *ep = fusb300->ep[0];
+	u32 reg;
+
+	if (!list_empty(&ep->queue)) {
+		struct fusb300_request *req;
+
+		req = list_first_entry(&ep->queue,
+			struct fusb300_request, queue);
+		if (req->req.length)
+			fusb300_rdcxf(ep->fusb300, req->req.buf,
+				req->req.length);
+		done(ep, req, 0);
+		reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
+		reg &= ~FUSB300_IGER1_CX_OUT_INT;
+		iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1);
+	} else
+		pr_err("%s : empty queue\n", __func__);
+}
+
+static void fusb300_ep0in(struct fusb300 *fusb300)
+{
+	struct fusb300_request *req;
+	struct fusb300_ep *ep = fusb300->ep[0];
+
+	if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) {
+		req = list_entry(ep->queue.next,
+				struct fusb300_request, queue);
+		if (req->req.length)
+			fusb300_wrcxf(ep, req);
+		if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
+			done(ep, req, 0);
+	} else
+		fusb300_set_cxdone(fusb300);
+}
+
+static void fusb300_grp2_handler(void)
+{
+}
+
+static void fusb300_grp3_handler(void)
+{
+}
+
+static void fusb300_grp4_handler(void)
+{
+}
+
+static void fusb300_grp5_handler(void)
+{
+}
+
+static irqreturn_t fusb300_irq(int irq, void *_fusb300)
+{
+	struct fusb300 *fusb300 = _fusb300;
+	u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
+	u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
+	u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0);
+	u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0);
+	struct usb_ctrlrequest ctrl;
+	u8 in;
+	u32 reg;
+	int i;
+
+	spin_lock(&fusb300->lock);
+
+	int_grp1 &= int_grp1_en;
+	int_grp0 &= int_grp0_en;
+
+	if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_WARM_RST_INT);
+		printk(KERN_INFO"fusb300_warmreset\n");
+		fusb300_reset();
+	}
+
+	if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_HOT_RST_INT);
+		printk(KERN_INFO"fusb300_hotreset\n");
+		fusb300_reset();
+	}
+
+	if (int_grp1 & FUSB300_IGR1_USBRST_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_USBRST_INT);
+		fusb300_reset();
+	}
+	/* COMABT_INT has a highest priority */
+
+	if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_CX_COMABT_INT);
+		printk(KERN_INFO"fusb300_ep0abt\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_VBUS_CHG_INT);
+		printk(KERN_INFO"fusb300_vbus_change\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U3_EXIT_FAIL_INT);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U2_EXIT_FAIL_INT);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U1_EXIT_FAIL_INT);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U2_ENTRY_FAIL_INT);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U1_ENTRY_FAIL_INT);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U3_EXIT_INT);
+		printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U2_EXIT_INT);
+		printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U1_EXIT_INT);
+		printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U3_ENTRY_INT);
+		printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n");
+		fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1,
+				   FUSB300_SSCR1_GO_U3_DONE);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U2_ENTRY_INT);
+		printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_U1_ENTRY_INT);
+		printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_RESM_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_RESM_INT);
+		printk(KERN_INFO "fusb300_resume\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_SUSP_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_SUSP_INT);
+		printk(KERN_INFO "fusb300_suspend\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_HS_LPM_INT);
+		printk(KERN_INFO "fusb300_HS_LPM_INT\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) {
+		fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+				  FUSB300_IGR1_DEV_MODE_CHG_INT);
+		check_device_mode(fusb300);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) {
+		fusb300_set_cxstall(fusb300);
+		printk(KERN_INFO "fusb300_ep0fail\n");
+	}
+
+	if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) {
+		printk(KERN_INFO "fusb300_ep0setup\n");
+		if (setup_packet(fusb300, &ctrl)) {
+			spin_unlock(&fusb300->lock);
+			if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0)
+				fusb300_set_cxstall(fusb300);
+			spin_lock(&fusb300->lock);
+		}
+	}
+
+	if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT)
+		printk(KERN_INFO "fusb300_cmdend\n");
+
+
+	if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) {
+		printk(KERN_INFO "fusb300_cxout\n");
+		fusb300_ep0out(fusb300);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_CX_IN_INT) {
+		printk(KERN_INFO "fusb300_cxin\n");
+		fusb300_ep0in(fusb300);
+	}
+
+	if (int_grp1 & FUSB300_IGR1_INTGRP5)
+		fusb300_grp5_handler();
+
+	if (int_grp1 & FUSB300_IGR1_INTGRP4)
+		fusb300_grp4_handler();
+
+	if (int_grp1 & FUSB300_IGR1_INTGRP3)
+		fusb300_grp3_handler();
+
+	if (int_grp1 & FUSB300_IGR1_INTGRP2)
+		fusb300_grp2_handler();
+
+	if (int_grp0) {
+		for (i = 1; i < FUSB300_MAX_NUM_EP; i++) {
+			if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) {
+				reg = ioread32(fusb300->reg +
+					FUSB300_OFFSET_EPSET1(i));
+				in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0;
+				if (in)
+					in_ep_fifo_handler(fusb300->ep[i]);
+				else
+					out_ep_fifo_handler(fusb300->ep[i]);
+			}
+		}
+	}
+
+	spin_unlock(&fusb300->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void fusb300_set_u2_timeout(struct fusb300 *fusb300,
+				   u32 time)
+{
+	u32 reg;
+
+	reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
+	reg &= ~0xff;
+	reg |= FUSB300_SSCR2_U2TIMEOUT(time);
+
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
+}
+
+static void fusb300_set_u1_timeout(struct fusb300 *fusb300,
+				   u32 time)
+{
+	u32 reg;
+
+	reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
+	reg &= ~(0xff << 8);
+	reg |= FUSB300_SSCR2_U1TIMEOUT(time);
+
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
+}
+
+static void init_controller(struct fusb300 *fusb300)
+{
+	u32 reg;
+	u32 mask = 0;
+	u32 val = 0;
+
+	/* split on */
+	mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON;
+	reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR);
+	reg &= ~mask;
+	reg |= val;
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR);
+
+	/* enable high-speed LPM */
+	mask = val = FUSB300_HSCR_HS_LPM_PERMIT;
+	reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR);
+	reg &= ~mask;
+	reg |= val;
+	iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR);
+
+	/*set u1 u2 timmer*/
+	fusb300_set_u2_timeout(fusb300, 0xff);
+	fusb300_set_u1_timeout(fusb300, 0xff);
+
+	/* enable all grp1 interrupt */
+	iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1);
+}
+/*------------------------------------------------------------------------*/
+static struct fusb300 *the_controller;
+
+static int fusb300_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct fusb300 *fusb300 = the_controller;
+	int retval;
+
+	if (!driver
+			|| driver->max_speed < USB_SPEED_FULL
+			|| !bind
+			|| !driver->setup)
+		return -EINVAL;
+
+	if (!fusb300)
+		return -ENODEV;
+
+	if (fusb300->driver)
+		return -EBUSY;
+
+	/* hook up the driver */
+	driver->driver.bus = NULL;
+	fusb300->driver = driver;
+	fusb300->gadget.dev.driver = &driver->driver;
+
+	retval = device_add(&fusb300->gadget.dev);
+	if (retval) {
+		pr_err("device_add error (%d)\n", retval);
+		goto error;
+	}
+
+	retval = bind(&fusb300->gadget);
+	if (retval) {
+		pr_err("bind to driver error (%d)\n", retval);
+		device_del(&fusb300->gadget.dev);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	fusb300->driver = NULL;
+	fusb300->gadget.dev.driver = NULL;
+
+	return retval;
+}
+
+static int fusb300_udc_stop(struct usb_gadget_driver *driver)
+{
+	struct fusb300 *fusb300 = the_controller;
+
+	if (driver != fusb300->driver || !driver->unbind)
+		return -EINVAL;
+
+	driver->unbind(&fusb300->gadget);
+	fusb300->gadget.dev.driver = NULL;
+
+	init_controller(fusb300);
+	device_del(&fusb300->gadget.dev);
+	fusb300->driver = NULL;
+
+	return 0;
+}
+/*--------------------------------------------------------------------------*/
+
+static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active)
+{
+	return 0;
+}
+
+static struct usb_gadget_ops fusb300_gadget_ops = {
+	.pullup		= fusb300_udc_pullup,
+	.start		= fusb300_udc_start,
+	.stop		= fusb300_udc_stop,
+};
+
+static int __exit fusb300_remove(struct platform_device *pdev)
+{
+	struct fusb300 *fusb300 = dev_get_drvdata(&pdev->dev);
+
+	usb_del_gadget_udc(&fusb300->gadget);
+	iounmap(fusb300->reg);
+	free_irq(platform_get_irq(pdev, 0), fusb300);
+
+	fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+	kfree(fusb300);
+
+	return 0;
+}
+
+static int __init fusb300_probe(struct platform_device *pdev)
+{
+	struct resource *res, *ires, *ires1;
+	void __iomem *reg = NULL;
+	struct fusb300 *fusb300 = NULL;
+	struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP];
+	int ret = 0;
+	int i;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		pr_err("platform_get_resource error.\n");
+		goto clean_up;
+	}
+
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!ires) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev,
+			"platform_get_resource IORESOURCE_IRQ error.\n");
+		goto clean_up;
+	}
+
+	ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+	if (!ires1) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev,
+			"platform_get_resource IORESOURCE_IRQ 1 error.\n");
+		goto clean_up;
+	}
+
+	reg = ioremap(res->start, resource_size(res));
+	if (reg == NULL) {
+		ret = -ENOMEM;
+		pr_err("ioremap error.\n");
+		goto clean_up;
+	}
+
+	/* initialize udc */
+	fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL);
+	if (fusb300 == NULL) {
+		pr_err("kzalloc error\n");
+		goto clean_up;
+	}
+
+	for (i = 0; i < FUSB300_MAX_NUM_EP; i++) {
+		_ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL);
+		if (_ep[i] == NULL) {
+			pr_err("_ep kzalloc error\n");
+			goto clean_up;
+		}
+		fusb300->ep[i] = _ep[i];
+	}
+
+	spin_lock_init(&fusb300->lock);
+
+	dev_set_drvdata(&pdev->dev, fusb300);
+
+	fusb300->gadget.ops = &fusb300_gadget_ops;
+
+	device_initialize(&fusb300->gadget.dev);
+
+	dev_set_name(&fusb300->gadget.dev, "gadget");
+
+	fusb300->gadget.max_speed = USB_SPEED_HIGH;
+	fusb300->gadget.dev.parent = &pdev->dev;
+	fusb300->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	fusb300->gadget.dev.release = pdev->dev.release;
+	fusb300->gadget.name = udc_name;
+	fusb300->reg = reg;
+
+	ret = request_irq(ires->start, fusb300_irq, IRQF_SHARED,
+			  udc_name, fusb300);
+	if (ret < 0) {
+		pr_err("request_irq error (%d)\n", ret);
+		goto clean_up;
+	}
+
+	ret = request_irq(ires1->start, fusb300_irq,
+			IRQF_SHARED, udc_name, fusb300);
+	if (ret < 0) {
+		pr_err("request_irq1 error (%d)\n", ret);
+		goto clean_up;
+	}
+
+	INIT_LIST_HEAD(&fusb300->gadget.ep_list);
+
+	for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) {
+		struct fusb300_ep *ep = fusb300->ep[i];
+
+		if (i != 0) {
+			INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list);
+			list_add_tail(&fusb300->ep[i]->ep.ep_list,
+				     &fusb300->gadget.ep_list);
+		}
+		ep->fusb300 = fusb300;
+		INIT_LIST_HEAD(&ep->queue);
+		ep->ep.name = fusb300_ep_name[i];
+		ep->ep.ops = &fusb300_ep_ops;
+		ep->ep.maxpacket = HS_BULK_MAX_PACKET_SIZE;
+	}
+	fusb300->ep[0]->ep.maxpacket = HS_CTL_MAX_PACKET_SIZE;
+	fusb300->ep[0]->epnum = 0;
+	fusb300->gadget.ep0 = &fusb300->ep[0]->ep;
+	INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list);
+
+	the_controller = fusb300;
+
+	fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
+				GFP_KERNEL);
+	if (fusb300->ep0_req == NULL)
+		goto clean_up3;
+
+	init_controller(fusb300);
+	ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
+	if (ret)
+		goto err_add_udc;
+
+	dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+
+	return 0;
+err_add_udc:
+	fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+
+clean_up3:
+	free_irq(ires->start, fusb300);
+
+clean_up:
+	if (fusb300) {
+		if (fusb300->ep0_req)
+			fusb300_free_request(&fusb300->ep[0]->ep,
+				fusb300->ep0_req);
+		kfree(fusb300);
+	}
+	if (reg)
+		iounmap(reg);
+
+	return ret;
+}
+
+static struct platform_driver fusb300_driver = {
+	.remove =	__exit_p(fusb300_remove),
+	.driver		= {
+		.name =	(char *) udc_name,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init fusb300_udc_init(void)
+{
+	return platform_driver_probe(&fusb300_driver, fusb300_probe);
+}
+
+module_init(fusb300_udc_init);
+
+static void __exit fusb300_udc_cleanup(void)
+{
+	platform_driver_unregister(&fusb300_driver);
+}
+module_exit(fusb300_udc_cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fusb300_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fusb300_udc.h
new file mode 100644
index 0000000..92745bd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/fusb300_udc.h
@@ -0,0 +1,677 @@
+/*
+ * Fusb300 UDC (USB gadget)
+ *
+ * Copyright (C) 2010 Faraday Technology Corp.
+ *
+ * Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+
+#ifndef __FUSB300_UDC_H__
+#define __FUSB300_UDC_H_
+
+#include <linux/kernel.h>
+
+#define FUSB300_OFFSET_GCR		0x00
+#define FUSB300_OFFSET_GTM		0x04
+#define FUSB300_OFFSET_DAR		0x08
+#define FUSB300_OFFSET_CSR		0x0C
+#define FUSB300_OFFSET_CXPORT		0x10
+#define FUSB300_OFFSET_EPSET0(n)	(0x20 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSET1(n)	(0x24 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSET2(n)	(0x28 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPFFR(n)		(0x2c + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSTRID(n)	(0x40 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_HSPTM		0x300
+#define FUSB300_OFFSET_HSCR		0x304
+#define FUSB300_OFFSET_SSCR0		0x308
+#define FUSB300_OFFSET_SSCR1		0x30C
+#define FUSB300_OFFSET_TT		0x310
+#define FUSB300_OFFSET_DEVNOTF		0x314
+#define FUSB300_OFFSET_DNC1		0x318
+#define FUSB300_OFFSET_CS		0x31C
+#define FUSB300_OFFSET_SOF		0x324
+#define FUSB300_OFFSET_EFCS		0x328
+#define FUSB300_OFFSET_IGR0		0x400
+#define FUSB300_OFFSET_IGR1		0x404
+#define FUSB300_OFFSET_IGR2		0x408
+#define FUSB300_OFFSET_IGR3		0x40C
+#define FUSB300_OFFSET_IGR4		0x410
+#define FUSB300_OFFSET_IGR5		0x414
+#define FUSB300_OFFSET_IGER0		0x420
+#define FUSB300_OFFSET_IGER1		0x424
+#define FUSB300_OFFSET_IGER2		0x428
+#define FUSB300_OFFSET_IGER3		0x42C
+#define FUSB300_OFFSET_IGER4		0x430
+#define FUSB300_OFFSET_IGER5		0x434
+#define FUSB300_OFFSET_DMAHMER		0x500
+#define FUSB300_OFFSET_EPPRDRDY		0x504
+#define FUSB300_OFFSET_DMAEPMR		0x508
+#define FUSB300_OFFSET_DMAENR		0x50C
+#define FUSB300_OFFSET_DMAAPR		0x510
+#define FUSB300_OFFSET_AHBCR		0x514
+#define FUSB300_OFFSET_EPPRD_W0(n)	(0x520 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPPRD_W1(n)	(0x524 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPPRD_W2(n)	(0x528 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPRD_PTR(n)	(0x52C + (n - 1) * 0x10)
+#define FUSB300_OFFSET_BUFDBG_START	0x800
+#define FUSB300_OFFSET_BUFDBG_END	0xBFC
+#define FUSB300_OFFSET_EPPORT(n)	(0x1010 + (n - 1) * 0x10)
+
+/*
+ * *	Global Control Register (offset = 000H)
+ * */
+#define FUSB300_GCR_SF_RST		(1 << 8)
+#define FUSB300_GCR_VBUS_STATUS		(1 << 7)
+#define FUSB300_GCR_FORCE_HS_SUSP	(1 << 6)
+#define FUSB300_GCR_SYNC_FIFO1_CLR	(1 << 5)
+#define FUSB300_GCR_SYNC_FIFO0_CLR	(1 << 4)
+#define FUSB300_GCR_FIFOCLR		(1 << 3)
+#define FUSB300_GCR_GLINTEN		(1 << 2)
+#define FUSB300_GCR_DEVEN_FS		0x3
+#define FUSB300_GCR_DEVEN_HS		0x2
+#define FUSB300_GCR_DEVEN_SS		0x1
+#define FUSB300_GCR_DEVDIS		0x0
+#define FUSB300_GCR_DEVEN_MSK		0x3
+
+
+/*
+ * *Global Test Mode (offset = 004H)
+ * */
+#define FUSB300_GTM_TST_DIS_SOFGEN	(1 << 16)
+#define FUSB300_GTM_TST_CUR_EP_ENTRY(n)	((n & 0xF) << 12)
+#define FUSB300_GTM_TST_EP_ENTRY(n)	((n & 0xF) << 8)
+#define FUSB300_GTM_TST_EP_NUM(n)	((n & 0xF) << 4)
+#define FUSB300_GTM_TST_FIFO_DEG	(1 << 1)
+#define FUSB300_GTM_TSTMODE		(1 << 0)
+
+/*
+ * * Device Address Register (offset = 008H)
+ * */
+#define FUSB300_DAR_SETCONFG	(1 << 7)
+#define FUSB300_DAR_DRVADDR(x)	(x & 0x7F)
+#define FUSB300_DAR_DRVADDR_MSK	0x7F
+
+/*
+ * *Control Transfer Configuration and Status Register
+ * (CX_Config_Status, offset = 00CH)
+ * */
+#define FUSB300_CSR_LEN(x)	((x & 0xFFFF) << 8)
+#define FUSB300_CSR_LEN_MSK	(0xFFFF << 8)
+#define FUSB300_CSR_EMP		(1 << 4)
+#define FUSB300_CSR_FUL		(1 << 3)
+#define FUSB300_CSR_CLR		(1 << 2)
+#define FUSB300_CSR_STL		(1 << 1)
+#define FUSB300_CSR_DONE	(1 << 0)
+
+/*
+ * * EPn Setting 0 (EPn_SET0, offset = 020H+(n-1)*30H, n=1~15 )
+ * */
+#define FUSB300_EPSET0_CLRSEQNUM	(1 << 2)
+#define FUSB300_EPSET0_EPn_TX0BYTE	(1 << 1)
+#define FUSB300_EPSET0_STL		(1 << 0)
+
+/*
+ * * EPn Setting 1 (EPn_SET1, offset = 024H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_EPSET1_START_ENTRY(x)	((x & 0xFF) << 24)
+#define FUSB300_EPSET1_START_ENTRY_MSK	(0xFF << 24)
+#define FUSB300_EPSET1_FIFOENTRY(x)	((x & 0x1F) << 12)
+#define FUSB300_EPSET1_FIFOENTRY_MSK	(0x1f << 12)
+#define FUSB300_EPSET1_INTERVAL(x)	((x & 0x7) << 6)
+#define FUSB300_EPSET1_BWNUM(x)		((x & 0x3) << 4)
+#define FUSB300_EPSET1_TYPEISO		(1 << 2)
+#define FUSB300_EPSET1_TYPEBLK		(2 << 2)
+#define FUSB300_EPSET1_TYPEINT		(3 << 2)
+#define FUSB300_EPSET1_TYPE(x)		((x & 0x3) << 2)
+#define FUSB300_EPSET1_TYPE_MSK		(0x3 << 2)
+#define FUSB300_EPSET1_DIROUT		(0 << 1)
+#define FUSB300_EPSET1_DIRIN		(1 << 1)
+#define FUSB300_EPSET1_DIR(x)		((x & 0x1) << 1)
+#define FUSB300_EPSET1_DIRIN		(1 << 1)
+#define FUSB300_EPSET1_DIR_MSK		((0x1) << 1)
+#define FUSB300_EPSET1_ACTDIS		0
+#define FUSB300_EPSET1_ACTEN		1
+
+/*
+ * *EPn Setting 2 (EPn_SET2, offset = 028H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_EPSET2_ADDROFS(x)	((x & 0x7FFF) << 16)
+#define FUSB300_EPSET2_ADDROFS_MSK	(0x7fff << 16)
+#define FUSB300_EPSET2_MPS(x)		(x & 0x7FF)
+#define FUSB300_EPSET2_MPS_MSK		0x7FF
+
+/*
+ * * EPn FIFO Register (offset = 2cH+(n-1)*30H)
+ * */
+#define FUSB300_FFR_RST		(1 << 31)
+#define FUSB300_FF_FUL		(1 << 30)
+#define FUSB300_FF_EMPTY	(1 << 29)
+#define FUSB300_FFR_BYCNT	0x1FFFF
+
+/*
+ * *EPn Stream ID (EPn_STR_ID, offset = 040H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_STRID_STREN	(1 << 16)
+#define FUSB300_STRID_STRID(x)	(x & 0xFFFF)
+
+/*
+ * *HS PHY Test Mode (offset = 300H)
+ * */
+#define FUSB300_HSPTM_TSTPKDONE		(1 << 4)
+#define FUSB300_HSPTM_TSTPKT		(1 << 3)
+#define FUSB300_HSPTM_TSTSET0NAK	(1 << 2)
+#define FUSB300_HSPTM_TSTKSTA		(1 << 1)
+#define FUSB300_HSPTM_TSTJSTA		(1 << 0)
+
+/*
+ * *HS Control Register (offset = 304H)
+ * */
+#define FUSB300_HSCR_HS_LPM_PERMIT	(1 << 8)
+#define FUSB300_HSCR_HS_LPM_RMWKUP	(1 << 7)
+#define FUSB300_HSCR_CAP_LPM_RMWKUP	(1 << 6)
+#define FUSB300_HSCR_HS_GOSUSP		(1 << 5)
+#define FUSB300_HSCR_HS_GORMWKU		(1 << 4)
+#define FUSB300_HSCR_CAP_RMWKUP		(1 << 3)
+#define FUSB300_HSCR_IDLECNT_0MS	0
+#define FUSB300_HSCR_IDLECNT_1MS	1
+#define FUSB300_HSCR_IDLECNT_2MS	2
+#define FUSB300_HSCR_IDLECNT_3MS	3
+#define FUSB300_HSCR_IDLECNT_4MS	4
+#define FUSB300_HSCR_IDLECNT_5MS	5
+#define FUSB300_HSCR_IDLECNT_6MS	6
+#define FUSB300_HSCR_IDLECNT_7MS	7
+
+/*
+ * * SS Controller Register 0 (offset = 308H)
+ * */
+#define FUSB300_SSCR0_MAX_INTERVAL(x)	((x & 0x7) << 4)
+#define FUSB300_SSCR0_U2_FUN_EN		(1 << 1)
+#define FUSB300_SSCR0_U1_FUN_EN		(1 << 0)
+
+/*
+ * * SS Controller Register 1 (offset = 30CH)
+ * */
+#define FUSB300_SSCR1_GO_U3_DONE	(1 << 8)
+#define FUSB300_SSCR1_TXDEEMPH_LEVEL	(1 << 7)
+#define FUSB300_SSCR1_DIS_SCRMB		(1 << 6)
+#define FUSB300_SSCR1_FORCE_RECOVERY	(1 << 5)
+#define FUSB300_SSCR1_U3_WAKEUP_EN	(1 << 4)
+#define FUSB300_SSCR1_U2_EXIT_EN	(1 << 3)
+#define FUSB300_SSCR1_U1_EXIT_EN	(1 << 2)
+#define FUSB300_SSCR1_U2_ENTRY_EN	(1 << 1)
+#define FUSB300_SSCR1_U1_ENTRY_EN	(1 << 0)
+
+/*
+ * *SS Controller Register 2  (offset = 310H)
+ * */
+#define FUSB300_SSCR2_SS_TX_SWING		(1 << 25)
+#define FUSB300_SSCR2_FORCE_LINKPM_ACCEPT	(1 << 24)
+#define FUSB300_SSCR2_U2_INACT_TIMEOUT(x)	((x & 0xFF) << 16)
+#define FUSB300_SSCR2_U1TIMEOUT(x)		((x & 0xFF) << 8)
+#define FUSB300_SSCR2_U2TIMEOUT(x)		(x & 0xFF)
+
+/*
+ * *SS Device Notification Control (DEV_NOTF, offset = 314H)
+ * */
+#define FUSB300_DEVNOTF_CONTEXT0(x)		((x & 0xFFFFFF) << 8)
+#define FUSB300_DEVNOTF_TYPE_DIS		0
+#define FUSB300_DEVNOTF_TYPE_FUNCWAKE		1
+#define FUSB300_DEVNOTF_TYPE_LTM		2
+#define FUSB300_DEVNOTF_TYPE_BUSINT_ADJMSG	3
+
+/*
+ * *BFM Arbiter Priority Register (BFM_ARB offset = 31CH)
+ * */
+#define FUSB300_BFMARB_ARB_M1	(1 << 3)
+#define FUSB300_BFMARB_ARB_M0	(1 << 2)
+#define FUSB300_BFMARB_ARB_S1	(1 << 1)
+#define FUSB300_BFMARB_ARB_S0	1
+
+/*
+ * *Vendor Specific IO Control Register (offset = 320H)
+ * */
+#define FUSB300_VSIC_VCTLOAD_N	(1 << 8)
+#define FUSB300_VSIC_VCTL(x)	(x & 0x3F)
+
+/*
+ * *SOF Mask Timer (offset = 324H)
+ * */
+#define FUSB300_SOF_MASK_TIMER_HS	0x044c
+#define FUSB300_SOF_MASK_TIMER_FS	0x2710
+
+/*
+ * *Error Flag and Control Status (offset = 328H)
+ * */
+#define FUSB300_EFCS_PM_STATE_U3	3
+#define FUSB300_EFCS_PM_STATE_U2	2
+#define FUSB300_EFCS_PM_STATE_U1	1
+#define FUSB300_EFCS_PM_STATE_U0	0
+
+/*
+ * *Interrupt Group 0 Register (offset = 400H)
+ * */
+#define FUSB300_IGR0_EP15_PRD_INT	(1 << 31)
+#define FUSB300_IGR0_EP14_PRD_INT	(1 << 30)
+#define FUSB300_IGR0_EP13_PRD_INT	(1 << 29)
+#define FUSB300_IGR0_EP12_PRD_INT	(1 << 28)
+#define FUSB300_IGR0_EP11_PRD_INT	(1 << 27)
+#define FUSB300_IGR0_EP10_PRD_INT	(1 << 26)
+#define FUSB300_IGR0_EP9_PRD_INT	(1 << 25)
+#define FUSB300_IGR0_EP8_PRD_INT	(1 << 24)
+#define FUSB300_IGR0_EP7_PRD_INT	(1 << 23)
+#define FUSB300_IGR0_EP6_PRD_INT	(1 << 22)
+#define FUSB300_IGR0_EP5_PRD_INT	(1 << 21)
+#define FUSB300_IGR0_EP4_PRD_INT	(1 << 20)
+#define FUSB300_IGR0_EP3_PRD_INT	(1 << 19)
+#define FUSB300_IGR0_EP2_PRD_INT	(1 << 18)
+#define FUSB300_IGR0_EP1_PRD_INT	(1 << 17)
+#define FUSB300_IGR0_EPn_PRD_INT(n)	(1 << (n + 16))
+
+#define FUSB300_IGR0_EP15_FIFO_INT	(1 << 15)
+#define FUSB300_IGR0_EP14_FIFO_INT	(1 << 14)
+#define FUSB300_IGR0_EP13_FIFO_INT	(1 << 13)
+#define FUSB300_IGR0_EP12_FIFO_INT	(1 << 12)
+#define FUSB300_IGR0_EP11_FIFO_INT	(1 << 11)
+#define FUSB300_IGR0_EP10_FIFO_INT	(1 << 10)
+#define FUSB300_IGR0_EP9_FIFO_INT	(1 << 9)
+#define FUSB300_IGR0_EP8_FIFO_INT	(1 << 8)
+#define FUSB300_IGR0_EP7_FIFO_INT	(1 << 7)
+#define FUSB300_IGR0_EP6_FIFO_INT	(1 << 6)
+#define FUSB300_IGR0_EP5_FIFO_INT	(1 << 5)
+#define FUSB300_IGR0_EP4_FIFO_INT	(1 << 4)
+#define FUSB300_IGR0_EP3_FIFO_INT	(1 << 3)
+#define FUSB300_IGR0_EP2_FIFO_INT	(1 << 2)
+#define FUSB300_IGR0_EP1_FIFO_INT	(1 << 1)
+#define FUSB300_IGR0_EPn_FIFO_INT(n)	(1 << n)
+
+/*
+ * *Interrupt Group 1 Register (offset = 404H)
+ * */
+#define FUSB300_IGR1_INTGRP5		(1 << 31)
+#define FUSB300_IGR1_VBUS_CHG_INT	(1 << 30)
+#define FUSB300_IGR1_SYNF1_EMPTY_INT	(1 << 29)
+#define FUSB300_IGR1_SYNF0_EMPTY_INT	(1 << 28)
+#define FUSB300_IGR1_U3_EXIT_FAIL_INT	(1 << 27)
+#define FUSB300_IGR1_U2_EXIT_FAIL_INT	(1 << 26)
+#define FUSB300_IGR1_U1_EXIT_FAIL_INT	(1 << 25)
+#define FUSB300_IGR1_U2_ENTRY_FAIL_INT	(1 << 24)
+#define FUSB300_IGR1_U1_ENTRY_FAIL_INT	(1 << 23)
+#define FUSB300_IGR1_U3_EXIT_INT	(1 << 22)
+#define FUSB300_IGR1_U2_EXIT_INT	(1 << 21)
+#define FUSB300_IGR1_U1_EXIT_INT	(1 << 20)
+#define FUSB300_IGR1_U3_ENTRY_INT	(1 << 19)
+#define FUSB300_IGR1_U2_ENTRY_INT	(1 << 18)
+#define FUSB300_IGR1_U1_ENTRY_INT	(1 << 17)
+#define FUSB300_IGR1_HOT_RST_INT	(1 << 16)
+#define FUSB300_IGR1_WARM_RST_INT	(1 << 15)
+#define FUSB300_IGR1_RESM_INT		(1 << 14)
+#define FUSB300_IGR1_SUSP_INT		(1 << 13)
+#define FUSB300_IGR1_HS_LPM_INT		(1 << 12)
+#define FUSB300_IGR1_USBRST_INT		(1 << 11)
+#define FUSB300_IGR1_DEV_MODE_CHG_INT	(1 << 9)
+#define FUSB300_IGR1_CX_COMABT_INT	(1 << 8)
+#define FUSB300_IGR1_CX_COMFAIL_INT	(1 << 7)
+#define FUSB300_IGR1_CX_CMDEND_INT	(1 << 6)
+#define FUSB300_IGR1_CX_OUT_INT		(1 << 5)
+#define FUSB300_IGR1_CX_IN_INT		(1 << 4)
+#define FUSB300_IGR1_CX_SETUP_INT	(1 << 3)
+#define FUSB300_IGR1_INTGRP4		(1 << 2)
+#define FUSB300_IGR1_INTGRP3		(1 << 1)
+#define FUSB300_IGR1_INTGRP2		(1 << 0)
+
+/*
+ * *Interrupt Group 2 Register (offset = 408H)
+ * */
+#define FUSB300_IGR2_EP6_STR_ACCEPT_INT		(1 << 29)
+#define FUSB300_IGR2_EP6_STR_RESUME_INT		(1 << 28)
+#define FUSB300_IGR2_EP6_STR_REQ_INT		(1 << 27)
+#define FUSB300_IGR2_EP6_STR_NOTRDY_INT		(1 << 26)
+#define FUSB300_IGR2_EP6_STR_PRIME_INT		(1 << 25)
+#define FUSB300_IGR2_EP5_STR_ACCEPT_INT		(1 << 24)
+#define FUSB300_IGR2_EP5_STR_RESUME_INT		(1 << 23)
+#define FUSB300_IGR2_EP5_STR_REQ_INT		(1 << 22)
+#define FUSB300_IGR2_EP5_STR_NOTRDY_INT		(1 << 21)
+#define FUSB300_IGR2_EP5_STR_PRIME_INT		(1 << 20)
+#define FUSB300_IGR2_EP4_STR_ACCEPT_INT		(1 << 19)
+#define FUSB300_IGR2_EP4_STR_RESUME_INT		(1 << 18)
+#define FUSB300_IGR2_EP4_STR_REQ_INT		(1 << 17)
+#define FUSB300_IGR2_EP4_STR_NOTRDY_INT		(1 << 16)
+#define FUSB300_IGR2_EP4_STR_PRIME_INT		(1 << 15)
+#define FUSB300_IGR2_EP3_STR_ACCEPT_INT		(1 << 14)
+#define FUSB300_IGR2_EP3_STR_RESUME_INT		(1 << 13)
+#define FUSB300_IGR2_EP3_STR_REQ_INT		(1 << 12)
+#define FUSB300_IGR2_EP3_STR_NOTRDY_INT		(1 << 11)
+#define FUSB300_IGR2_EP3_STR_PRIME_INT		(1 << 10)
+#define FUSB300_IGR2_EP2_STR_ACCEPT_INT		(1 << 9)
+#define FUSB300_IGR2_EP2_STR_RESUME_INT		(1 << 8)
+#define FUSB300_IGR2_EP2_STR_REQ_INT		(1 << 7)
+#define FUSB300_IGR2_EP2_STR_NOTRDY_INT		(1 << 6)
+#define FUSB300_IGR2_EP2_STR_PRIME_INT		(1 << 5)
+#define FUSB300_IGR2_EP1_STR_ACCEPT_INT		(1 << 4)
+#define FUSB300_IGR2_EP1_STR_RESUME_INT		(1 << 3)
+#define FUSB300_IGR2_EP1_STR_REQ_INT		(1 << 2)
+#define FUSB300_IGR2_EP1_STR_NOTRDY_INT		(1 << 1)
+#define FUSB300_IGR2_EP1_STR_PRIME_INT		(1 << 0)
+
+#define FUSB300_IGR2_EP_STR_ACCEPT_INT(n)	(1 << (5 * n - 1))
+#define FUSB300_IGR2_EP_STR_RESUME_INT(n)	(1 << (5 * n - 2))
+#define FUSB300_IGR2_EP_STR_REQ_INT(n)		(1 << (5 * n - 3))
+#define FUSB300_IGR2_EP_STR_NOTRDY_INT(n)	(1 << (5 * n - 4))
+#define FUSB300_IGR2_EP_STR_PRIME_INT(n)	(1 << (5 * n - 5))
+
+/*
+ * *Interrupt Group 3 Register (offset = 40CH)
+ * */
+#define FUSB300_IGR3_EP12_STR_ACCEPT_INT	(1 << 29)
+#define FUSB300_IGR3_EP12_STR_RESUME_INT	(1 << 28)
+#define FUSB300_IGR3_EP12_STR_REQ_INT		(1 << 27)
+#define FUSB300_IGR3_EP12_STR_NOTRDY_INT	(1 << 26)
+#define FUSB300_IGR3_EP12_STR_PRIME_INT		(1 << 25)
+#define FUSB300_IGR3_EP11_STR_ACCEPT_INT	(1 << 24)
+#define FUSB300_IGR3_EP11_STR_RESUME_INT	(1 << 23)
+#define FUSB300_IGR3_EP11_STR_REQ_INT		(1 << 22)
+#define FUSB300_IGR3_EP11_STR_NOTRDY_INT	(1 << 21)
+#define FUSB300_IGR3_EP11_STR_PRIME_INT		(1 << 20)
+#define FUSB300_IGR3_EP10_STR_ACCEPT_INT	(1 << 19)
+#define FUSB300_IGR3_EP10_STR_RESUME_INT	(1 << 18)
+#define FUSB300_IGR3_EP10_STR_REQ_INT		(1 << 17)
+#define FUSB300_IGR3_EP10_STR_NOTRDY_INT	(1 << 16)
+#define FUSB300_IGR3_EP10_STR_PRIME_INT		(1 << 15)
+#define FUSB300_IGR3_EP9_STR_ACCEPT_INT		(1 << 14)
+#define FUSB300_IGR3_EP9_STR_RESUME_INT		(1 << 13)
+#define FUSB300_IGR3_EP9_STR_REQ_INT		(1 << 12)
+#define FUSB300_IGR3_EP9_STR_NOTRDY_INT		(1 << 11)
+#define FUSB300_IGR3_EP9_STR_PRIME_INT		(1 << 10)
+#define FUSB300_IGR3_EP8_STR_ACCEPT_INT		(1 << 9)
+#define FUSB300_IGR3_EP8_STR_RESUME_INT		(1 << 8)
+#define FUSB300_IGR3_EP8_STR_REQ_INT		(1 << 7)
+#define FUSB300_IGR3_EP8_STR_NOTRDY_INT		(1 << 6)
+#define FUSB300_IGR3_EP8_STR_PRIME_INT		(1 << 5)
+#define FUSB300_IGR3_EP7_STR_ACCEPT_INT		(1 << 4)
+#define FUSB300_IGR3_EP7_STR_RESUME_INT		(1 << 3)
+#define FUSB300_IGR3_EP7_STR_REQ_INT		(1 << 2)
+#define FUSB300_IGR3_EP7_STR_NOTRDY_INT		(1 << 1)
+#define FUSB300_IGR3_EP7_STR_PRIME_INT		(1 << 0)
+
+#define FUSB300_IGR3_EP_STR_ACCEPT_INT(n)	(1 << (5 * (n - 6) - 1))
+#define FUSB300_IGR3_EP_STR_RESUME_INT(n)	(1 << (5 * (n - 6) - 2))
+#define FUSB300_IGR3_EP_STR_REQ_INT(n)		(1 << (5 * (n - 6) - 3))
+#define FUSB300_IGR3_EP_STR_NOTRDY_INT(n)	(1 << (5 * (n - 6) - 4))
+#define FUSB300_IGR3_EP_STR_PRIME_INT(n)	(1 << (5 * (n - 6) - 5))
+
+/*
+ * *Interrupt Group 4 Register (offset = 410H)
+ * */
+#define FUSB300_IGR4_EP15_RX0_INT		(1 << 31)
+#define FUSB300_IGR4_EP14_RX0_INT		(1 << 30)
+#define FUSB300_IGR4_EP13_RX0_INT		(1 << 29)
+#define FUSB300_IGR4_EP12_RX0_INT		(1 << 28)
+#define FUSB300_IGR4_EP11_RX0_INT		(1 << 27)
+#define FUSB300_IGR4_EP10_RX0_INT		(1 << 26)
+#define FUSB300_IGR4_EP9_RX0_INT		(1 << 25)
+#define FUSB300_IGR4_EP8_RX0_INT		(1 << 24)
+#define FUSB300_IGR4_EP7_RX0_INT		(1 << 23)
+#define FUSB300_IGR4_EP6_RX0_INT		(1 << 22)
+#define FUSB300_IGR4_EP5_RX0_INT		(1 << 21)
+#define FUSB300_IGR4_EP4_RX0_INT		(1 << 20)
+#define FUSB300_IGR4_EP3_RX0_INT		(1 << 19)
+#define FUSB300_IGR4_EP2_RX0_INT		(1 << 18)
+#define FUSB300_IGR4_EP1_RX0_INT		(1 << 17)
+#define FUSB300_IGR4_EP_RX0_INT(x)		(1 << (x + 16))
+#define FUSB300_IGR4_EP15_STR_ACCEPT_INT	(1 << 14)
+#define FUSB300_IGR4_EP15_STR_RESUME_INT	(1 << 13)
+#define FUSB300_IGR4_EP15_STR_REQ_INT		(1 << 12)
+#define FUSB300_IGR4_EP15_STR_NOTRDY_INT	(1 << 11)
+#define FUSB300_IGR4_EP15_STR_PRIME_INT		(1 << 10)
+#define FUSB300_IGR4_EP14_STR_ACCEPT_INT	(1 << 9)
+#define FUSB300_IGR4_EP14_STR_RESUME_INT	(1 << 8)
+#define FUSB300_IGR4_EP14_STR_REQ_INT		(1 << 7)
+#define FUSB300_IGR4_EP14_STR_NOTRDY_INT	(1 << 6)
+#define FUSB300_IGR4_EP14_STR_PRIME_INT		(1 << 5)
+#define FUSB300_IGR4_EP13_STR_ACCEPT_INT	(1 << 4)
+#define FUSB300_IGR4_EP13_STR_RESUME_INT	(1 << 3)
+#define FUSB300_IGR4_EP13_STR_REQ_INT		(1 << 2)
+#define FUSB300_IGR4_EP13_STR_NOTRDY_INT	(1 << 1)
+#define FUSB300_IGR4_EP13_STR_PRIME_INT		(1 << 0)
+
+#define FUSB300_IGR4_EP_STR_ACCEPT_INT(n)	(1 << (5 * (n - 12) - 1))
+#define FUSB300_IGR4_EP_STR_RESUME_INT(n)	(1 << (5 * (n - 12) - 2))
+#define FUSB300_IGR4_EP_STR_REQ_INT(n)		(1 << (5 * (n - 12) - 3))
+#define FUSB300_IGR4_EP_STR_NOTRDY_INT(n)	(1 << (5 * (n - 12) - 4))
+#define FUSB300_IGR4_EP_STR_PRIME_INT(n)	(1 << (5 * (n - 12) - 5))
+
+/*
+ * *Interrupt Group 5 Register (offset = 414H)
+ * */
+#define FUSB300_IGR5_EP_STL_INT(n)	(1 << n)
+
+/*
+ * *Interrupt Enable Group 0 Register (offset = 420H)
+ * */
+#define FUSB300_IGER0_EEP15_PRD_INT	(1 << 31)
+#define FUSB300_IGER0_EEP14_PRD_INT	(1 << 30)
+#define FUSB300_IGER0_EEP13_PRD_INT	(1 << 29)
+#define FUSB300_IGER0_EEP12_PRD_INT	(1 << 28)
+#define FUSB300_IGER0_EEP11_PRD_INT	(1 << 27)
+#define FUSB300_IGER0_EEP10_PRD_INT	(1 << 26)
+#define FUSB300_IGER0_EEP9_PRD_INT	(1 << 25)
+#define FUSB300_IGER0_EP8_PRD_INT	(1 << 24)
+#define FUSB300_IGER0_EEP7_PRD_INT	(1 << 23)
+#define FUSB300_IGER0_EEP6_PRD_INT	(1 << 22)
+#define FUSB300_IGER0_EEP5_PRD_INT	(1 << 21)
+#define FUSB300_IGER0_EEP4_PRD_INT	(1 << 20)
+#define FUSB300_IGER0_EEP3_PRD_INT	(1 << 19)
+#define FUSB300_IGER0_EEP2_PRD_INT	(1 << 18)
+#define FUSB300_IGER0_EEP1_PRD_INT	(1 << 17)
+#define FUSB300_IGER0_EEPn_PRD_INT(n)	(1 << (n + 16))
+
+#define FUSB300_IGER0_EEP15_FIFO_INT	(1 << 15)
+#define FUSB300_IGER0_EEP14_FIFO_INT	(1 << 14)
+#define FUSB300_IGER0_EEP13_FIFO_INT	(1 << 13)
+#define FUSB300_IGER0_EEP12_FIFO_INT	(1 << 12)
+#define FUSB300_IGER0_EEP11_FIFO_INT	(1 << 11)
+#define FUSB300_IGER0_EEP10_FIFO_INT	(1 << 10)
+#define FUSB300_IGER0_EEP9_FIFO_INT	(1 << 9)
+#define FUSB300_IGER0_EEP8_FIFO_INT	(1 << 8)
+#define FUSB300_IGER0_EEP7_FIFO_INT	(1 << 7)
+#define FUSB300_IGER0_EEP6_FIFO_INT	(1 << 6)
+#define FUSB300_IGER0_EEP5_FIFO_INT	(1 << 5)
+#define FUSB300_IGER0_EEP4_FIFO_INT	(1 << 4)
+#define FUSB300_IGER0_EEP3_FIFO_INT	(1 << 3)
+#define FUSB300_IGER0_EEP2_FIFO_INT	(1 << 2)
+#define FUSB300_IGER0_EEP1_FIFO_INT	(1 << 1)
+#define FUSB300_IGER0_EEPn_FIFO_INT(n)	(1 << n)
+
+/*
+ * *Interrupt Enable Group 1 Register (offset = 424H)
+ * */
+#define FUSB300_IGER1_EINT_GRP5		(1 << 31)
+#define FUSB300_IGER1_VBUS_CHG_INT	(1 << 30)
+#define FUSB300_IGER1_SYNF1_EMPTY_INT	(1 << 29)
+#define FUSB300_IGER1_SYNF0_EMPTY_INT	(1 << 28)
+#define FUSB300_IGER1_U3_EXIT_FAIL_INT	(1 << 27)
+#define FUSB300_IGER1_U2_EXIT_FAIL_INT	(1 << 26)
+#define FUSB300_IGER1_U1_EXIT_FAIL_INT	(1 << 25)
+#define FUSB300_IGER1_U2_ENTRY_FAIL_INT	(1 << 24)
+#define FUSB300_IGER1_U1_ENTRY_FAIL_INT	(1 << 23)
+#define FUSB300_IGER1_U3_EXIT_INT	(1 << 22)
+#define FUSB300_IGER1_U2_EXIT_INT	(1 << 21)
+#define FUSB300_IGER1_U1_EXIT_INT	(1 << 20)
+#define FUSB300_IGER1_U3_ENTRY_INT	(1 << 19)
+#define FUSB300_IGER1_U2_ENTRY_INT	(1 << 18)
+#define FUSB300_IGER1_U1_ENTRY_INT	(1 << 17)
+#define FUSB300_IGER1_HOT_RST_INT	(1 << 16)
+#define FUSB300_IGER1_WARM_RST_INT	(1 << 15)
+#define FUSB300_IGER1_RESM_INT		(1 << 14)
+#define FUSB300_IGER1_SUSP_INT		(1 << 13)
+#define FUSB300_IGER1_LPM_INT		(1 << 12)
+#define FUSB300_IGER1_HS_RST_INT	(1 << 11)
+#define FUSB300_IGER1_EDEV_MODE_CHG_INT	(1 << 9)
+#define FUSB300_IGER1_CX_COMABT_INT	(1 << 8)
+#define FUSB300_IGER1_CX_COMFAIL_INT	(1 << 7)
+#define FUSB300_IGER1_CX_CMDEND_INT	(1 << 6)
+#define FUSB300_IGER1_CX_OUT_INT	(1 << 5)
+#define FUSB300_IGER1_CX_IN_INT		(1 << 4)
+#define FUSB300_IGER1_CX_SETUP_INT	(1 << 3)
+#define FUSB300_IGER1_INTGRP4		(1 << 2)
+#define FUSB300_IGER1_INTGRP3		(1 << 1)
+#define FUSB300_IGER1_INTGRP2		(1 << 0)
+
+/*
+ * *Interrupt Enable Group 2 Register (offset = 428H)
+ * */
+#define FUSB300_IGER2_EEP_STR_ACCEPT_INT(n)	(1 << (5 * n - 1))
+#define FUSB300_IGER2_EEP_STR_RESUME_INT(n)	(1 << (5 * n - 2))
+#define FUSB300_IGER2_EEP_STR_REQ_INT(n)	(1 << (5 * n - 3))
+#define FUSB300_IGER2_EEP_STR_NOTRDY_INT(n)	(1 << (5 * n - 4))
+#define FUSB300_IGER2_EEP_STR_PRIME_INT(n)	(1 << (5 * n - 5))
+
+/*
+ * *Interrupt Enable Group 3 Register (offset = 42CH)
+ * */
+
+#define FUSB300_IGER3_EEP_STR_ACCEPT_INT(n)	(1 << (5 * (n - 6) - 1))
+#define FUSB300_IGER3_EEP_STR_RESUME_INT(n)	(1 << (5 * (n - 6) - 2))
+#define FUSB300_IGER3_EEP_STR_REQ_INT(n)	(1 << (5 * (n - 6) - 3))
+#define FUSB300_IGER3_EEP_STR_NOTRDY_INT(n)	(1 << (5 * (n - 6) - 4))
+#define FUSB300_IGER3_EEP_STR_PRIME_INT(n)	(1 << (5 * (n - 6) - 5))
+
+/*
+ * *Interrupt Enable Group 4 Register (offset = 430H)
+ * */
+
+#define FUSB300_IGER4_EEP_RX0_INT(n)		(1 << (n + 16))
+#define FUSB300_IGER4_EEP_STR_ACCEPT_INT(n)	(1 << (5 * (n - 6) - 1))
+#define FUSB300_IGER4_EEP_STR_RESUME_INT(n)	(1 << (5 * (n - 6) - 2))
+#define FUSB300_IGER4_EEP_STR_REQ_INT(n)	(1 << (5 * (n - 6) - 3))
+#define FUSB300_IGER4_EEP_STR_NOTRDY_INT(n)	(1 << (5 * (n - 6) - 4))
+#define FUSB300_IGER4_EEP_STR_PRIME_INT(n)	(1 << (5 * (n - 6) - 5))
+
+/* EP PRD Ready (EP_PRD_RDY, offset = 504H) */
+
+#define FUSB300_EPPRDR_EP15_PRD_RDY		(1 << 15)
+#define FUSB300_EPPRDR_EP14_PRD_RDY		(1 << 14)
+#define FUSB300_EPPRDR_EP13_PRD_RDY		(1 << 13)
+#define FUSB300_EPPRDR_EP12_PRD_RDY		(1 << 12)
+#define FUSB300_EPPRDR_EP11_PRD_RDY		(1 << 11)
+#define FUSB300_EPPRDR_EP10_PRD_RDY		(1 << 10)
+#define FUSB300_EPPRDR_EP9_PRD_RDY		(1 << 9)
+#define FUSB300_EPPRDR_EP8_PRD_RDY		(1 << 8)
+#define FUSB300_EPPRDR_EP7_PRD_RDY		(1 << 7)
+#define FUSB300_EPPRDR_EP6_PRD_RDY		(1 << 6)
+#define FUSB300_EPPRDR_EP5_PRD_RDY		(1 << 5)
+#define FUSB300_EPPRDR_EP4_PRD_RDY		(1 << 4)
+#define FUSB300_EPPRDR_EP3_PRD_RDY		(1 << 3)
+#define FUSB300_EPPRDR_EP2_PRD_RDY		(1 << 2)
+#define FUSB300_EPPRDR_EP1_PRD_RDY		(1 << 1)
+#define FUSB300_EPPRDR_EP_PRD_RDY(n)		(1 << n)
+
+/* AHB Bus Control Register (offset = 514H) */
+#define FUSB300_AHBBCR_S1_SPLIT_ON		(1 << 17)
+#define FUSB300_AHBBCR_S0_SPLIT_ON		(1 << 16)
+#define FUSB300_AHBBCR_S1_1entry		(0 << 12)
+#define FUSB300_AHBBCR_S1_4entry		(3 << 12)
+#define FUSB300_AHBBCR_S1_8entry		(5 << 12)
+#define FUSB300_AHBBCR_S1_16entry		(7 << 12)
+#define FUSB300_AHBBCR_S0_1entry		(0 << 8)
+#define FUSB300_AHBBCR_S0_4entry		(3 << 8)
+#define FUSB300_AHBBCR_S0_8entry		(5 << 8)
+#define FUSB300_AHBBCR_S0_16entry		(7 << 8)
+#define FUSB300_AHBBCR_M1_BURST_SINGLE		(0 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR		(1 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR4		(3 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR8		(5 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR16		(7 << 4)
+#define FUSB300_AHBBCR_M0_BURST_SINGLE		0
+#define FUSB300_AHBBCR_M0_BURST_INCR		1
+#define FUSB300_AHBBCR_M0_BURST_INCR4		3
+#define FUSB300_AHBBCR_M0_BURST_INCR8		5
+#define FUSB300_AHBBCR_M0_BURST_INCR16		7
+#define FUSB300_IGER5_EEP_STL_INT(n)		(1 << n)
+
+/* WORD 0 Data Structure of PRD Table */
+#define FUSB300_EPPRD0_M			(1 << 30)
+#define FUSB300_EPPRD0_O			(1 << 29)
+/* The finished prd */
+#define FUSB300_EPPRD0_F			(1 << 28)
+#define FUSB300_EPPRD0_I			(1 << 27)
+#define FUSB300_EPPRD0_A			(1 << 26)
+/* To decide HW point to first prd at next time */
+#define FUSB300_EPPRD0_L			(1 << 25)
+#define FUSB300_EPPRD0_H			(1 << 24)
+#define FUSB300_EPPRD0_BTC(n)			(n & 0xFFFFFF)
+
+/*----------------------------------------------------------------------*/
+#define FUSB300_MAX_NUM_EP		16
+
+#define FUSB300_FIFO_ENTRY_NUM		8
+#define FUSB300_MAX_FIFO_ENTRY		8
+
+#define SS_CTL_MAX_PACKET_SIZE		0x200
+#define SS_BULK_MAX_PACKET_SIZE		0x400
+#define SS_INT_MAX_PACKET_SIZE		0x400
+#define SS_ISO_MAX_PACKET_SIZE		0x400
+
+#define HS_BULK_MAX_PACKET_SIZE		0x200
+#define HS_CTL_MAX_PACKET_SIZE		0x40
+#define HS_INT_MAX_PACKET_SIZE		0x400
+#define HS_ISO_MAX_PACKET_SIZE		0x400
+
+struct fusb300_ep_info {
+	u8	epnum;
+	u8	type;
+	u8	interval;
+	u8	dir_in;
+	u16	maxpacket;
+	u16	addrofs;
+	u16	bw_num;
+};
+
+struct fusb300_request {
+
+	struct usb_request	req;
+	struct list_head	queue;
+};
+
+
+struct fusb300_ep {
+	struct usb_ep		ep;
+	struct fusb300		*fusb300;
+
+	struct list_head	queue;
+	unsigned		stall:1;
+	unsigned		wedged:1;
+	unsigned		use_dma:1;
+
+	unsigned char		epnum;
+	unsigned char		type;
+	const struct usb_endpoint_descriptor	*desc;
+};
+
+struct fusb300 {
+	spinlock_t		lock;
+	void __iomem		*reg;
+
+	unsigned long		irq_trigger;
+
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+
+	struct fusb300_ep	*ep[FUSB300_MAX_NUM_EP];
+
+	struct usb_request	*ep0_req;	/* for internal request */
+	__le16			ep0_data;
+	u32			ep0_length;	/* for internal request */
+	u8			ep0_dir;	/* 0/0x80  out/in */
+
+	u8			fifo_entry_num;	/* next start fifo entry */
+	u32			addrofs;	/* next fifo address offset */
+	u8			reenum;		/* if re-enumeration */
+};
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/g_ffs.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/g_ffs.c
new file mode 100644
index 0000000..a85eaf4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/g_ffs.c
@@ -0,0 +1,327 @@
+/*
+ * g_ffs.c -- user mode file system API for USB composite function controllers
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Author: Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "g_ffs: " fmt
+
+#include <linux/module.h>
+#include <linux/utsname.h>
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
+#  if defined USB_ETH_RNDIS
+#    undef USB_ETH_RNDIS
+#  endif
+#  ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+#    define USB_ETH_RNDIS y
+#  endif
+
+#  include "f_ecm.c"
+#  include "f_subset.c"
+#  ifdef USB_ETH_RNDIS
+#    include "f_rndis.c"
+#    include "rndis.c"
+#  endif
+#  include "u_ether.c"
+
+static u8 gfs_hostaddr[ETH_ALEN];
+#  ifdef CONFIG_USB_FUNCTIONFS_ETH
+static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+#  endif
+#else
+#  define gether_cleanup() do { } while (0)
+#  define gether_setup(gadget, hostaddr)   ((int)0)
+#  define gfs_hostaddr NULL
+#endif
+
+#include "f_fs.c"
+
+#define DRIVER_NAME	"g_ffs"
+#define DRIVER_DESC	"USB Function Filesystem"
+#define DRIVER_VERSION	"24 Aug 2004"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+#define GFS_VENDOR_ID	0x1d6b	/* Linux Foundation */
+#define GFS_PRODUCT_ID	0x0105	/* FunctionFS Gadget */
+
+static struct usb_device_descriptor gfs_dev_desc = {
+	.bLength		= sizeof gfs_dev_desc,
+	.bDescriptorType	= USB_DT_DEVICE,
+
+	.bcdUSB			= cpu_to_le16(0x0200),
+	.bDeviceClass		= USB_CLASS_PER_INTERFACE,
+
+	.idVendor		= cpu_to_le16(GFS_VENDOR_ID),
+	.idProduct		= cpu_to_le16(GFS_PRODUCT_ID),
+};
+
+module_param_named(bDeviceClass,    gfs_dev_desc.bDeviceClass,    byte,   0644);
+MODULE_PARM_DESC(bDeviceClass, "USB Device class");
+module_param_named(bDeviceSubClass, gfs_dev_desc.bDeviceSubClass, byte,   0644);
+MODULE_PARM_DESC(bDeviceSubClass, "USB Device subclass");
+module_param_named(bDeviceProtocol, gfs_dev_desc.bDeviceProtocol, byte,   0644);
+MODULE_PARM_DESC(bDeviceProtocol, "USB Device protocol");
+
+static const struct usb_descriptor_header *gfs_otg_desc[] = {
+	(const struct usb_descriptor_header *)
+	&(const struct usb_otg_descriptor) {
+		.bLength		= sizeof(struct usb_otg_descriptor),
+		.bDescriptorType	= USB_DT_OTG,
+
+		/*
+		 * REVISIT SRP-only hardware is possible, although
+		 * it would not be called "OTG" ...
+		 */
+		.bmAttributes		= USB_OTG_SRP | USB_OTG_HNP,
+	},
+
+	NULL
+};
+
+/* String IDs are assigned dynamically */
+static struct usb_string gfs_strings[] = {
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+	{ .s = "FunctionFS + RNDIS" },
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+	{ .s = "FunctionFS + ECM" },
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+	{ .s = "FunctionFS" },
+#endif
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings *gfs_dev_strings[] = {
+	&(struct usb_gadget_strings) {
+		.language	= 0x0409,	/* en-us */
+		.strings	= gfs_strings,
+	},
+	NULL,
+};
+
+struct gfs_configuration {
+	struct usb_configuration c;
+	int (*eth)(struct usb_configuration *c, u8 *ethaddr);
+} gfs_configurations[] = {
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+	{
+		.eth		= rndis_bind_config,
+	},
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+	{
+		.eth		= eth_bind_config,
+	},
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+	{
+	},
+#endif
+};
+
+static int gfs_bind(struct usb_composite_dev *cdev);
+static int gfs_unbind(struct usb_composite_dev *cdev);
+static int gfs_do_config(struct usb_configuration *c);
+
+static struct usb_composite_driver gfs_driver = {
+	.name		= DRIVER_NAME,
+	.dev		= &gfs_dev_desc,
+	.strings	= gfs_dev_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= gfs_unbind,
+	.iProduct	= DRIVER_DESC,
+};
+
+static struct ffs_data *gfs_ffs_data;
+static unsigned long gfs_registered;
+
+static int __init gfs_init(void)
+{
+	ENTER();
+
+	return functionfs_init();
+}
+module_init(gfs_init);
+
+static void __exit gfs_exit(void)
+{
+	ENTER();
+
+	if (test_and_clear_bit(0, &gfs_registered))
+		usb_composite_unregister(&gfs_driver);
+
+	functionfs_cleanup();
+}
+module_exit(gfs_exit);
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+	int ret;
+
+	ENTER();
+
+	if (WARN_ON(test_and_set_bit(0, &gfs_registered)))
+		return -EBUSY;
+
+	gfs_ffs_data = ffs;
+	ret = usb_composite_probe(&gfs_driver, gfs_bind);
+	if (unlikely(ret < 0))
+		clear_bit(0, &gfs_registered);
+	return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (test_and_clear_bit(0, &gfs_registered))
+		usb_composite_unregister(&gfs_driver);
+}
+
+static int functionfs_check_dev_callback(const char *dev_name)
+{
+	return 0;
+}
+
+static int gfs_bind(struct usb_composite_dev *cdev)
+{
+	int ret, i;
+
+	ENTER();
+
+	if (WARN_ON(!gfs_ffs_data))
+		return -ENODEV;
+
+	ret = gether_setup(cdev->gadget, gfs_hostaddr);
+	if (unlikely(ret < 0))
+		goto error_quick;
+
+	ret = usb_string_ids_tab(cdev, gfs_strings);
+	if (unlikely(ret < 0))
+		goto error;
+
+	ret = functionfs_bind(gfs_ffs_data, cdev);
+	if (unlikely(ret < 0))
+		goto error;
+
+	for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) {
+		struct gfs_configuration *c = gfs_configurations + i;
+
+		c->c.label			= gfs_strings[i].s;
+		c->c.iConfiguration		= gfs_strings[i].id;
+		c->c.bConfigurationValue	= 1 + i;
+		c->c.bmAttributes		= USB_CONFIG_ATT_SELFPOWER;
+
+		ret = usb_add_config(cdev, &c->c, gfs_do_config);
+		if (unlikely(ret < 0))
+			goto error_unbind;
+	}
+
+	return 0;
+
+error_unbind:
+	functionfs_unbind(gfs_ffs_data);
+error:
+	gether_cleanup();
+error_quick:
+	gfs_ffs_data = NULL;
+	return ret;
+}
+
+static int gfs_unbind(struct usb_composite_dev *cdev)
+{
+	ENTER();
+
+	/*
+	 * We may have been called in an error recovery from
+	 * composite_bind() after gfs_unbind() failure so we need to
+	 * check if gfs_ffs_data is not NULL since gfs_bind() handles
+	 * all error recovery itself.  I'd rather we werent called
+	 * from composite on orror recovery, but what you're gonna
+	 * do...?
+	 */
+	if (gfs_ffs_data) {
+		gether_cleanup();
+		functionfs_unbind(gfs_ffs_data);
+		gfs_ffs_data = NULL;
+	}
+
+	return 0;
+}
+
+static int gfs_do_config(struct usb_configuration *c)
+{
+	struct gfs_configuration *gc =
+		container_of(c, struct gfs_configuration, c);
+	int ret;
+
+	if (WARN_ON(!gfs_ffs_data))
+		return -ENODEV;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = gfs_otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	if (gc->eth) {
+		ret = gc->eth(c, gfs_hostaddr);
+		if (unlikely(ret < 0))
+			return ret;
+	}
+
+	ret = functionfs_bind_config(c->cdev, c, gfs_ffs_data);
+	if (unlikely(ret < 0))
+		return ret;
+
+	/*
+	 * After previous do_configs there may be some invalid
+	 * pointers in c->interface array.  This happens every time
+	 * a user space function with fewer interfaces than a user
+	 * space function that was run before the new one is run.  The
+	 * compasit's set_config() assumes that if there is no more
+	 * then MAX_CONFIG_INTERFACES interfaces in a configuration
+	 * then there is a NULL pointer after the last interface in
+	 * c->interface array.  We need to make sure this is true.
+	 */
+	if (c->next_interface_id < ARRAY_SIZE(c->interface))
+		c->interface[c->next_interface_id] = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+
+static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	return can_support_ecm(c->cdev->gadget)
+		? ecm_bind_config(c, ethaddr)
+		: geth_bind_config(c, ethaddr);
+}
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/g_zero.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/g_zero.h
new file mode 100644
index 0000000..e84b3c4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/g_zero.h
@@ -0,0 +1,25 @@
+/*
+ * This header declares the utility functions used by "Gadget Zero", plus
+ * interfaces to its two single-configuration function drivers.
+ */
+
+#ifndef __G_ZERO_H
+#define __G_ZERO_H
+
+#include <linux/usb/composite.h>
+
+/* global state */
+extern unsigned buflen;
+extern const struct usb_descriptor_header *otg_desc[];
+
+/* common utilities */
+struct usb_request *alloc_ep_req(struct usb_ep *ep);
+void free_ep_req(struct usb_ep *ep, struct usb_request *req);
+void disable_endpoints(struct usb_composite_dev *cdev,
+		struct usb_ep *in, struct usb_ep *out);
+
+/* configuration-specific linkup */
+int sourcesink_add(struct usb_composite_dev *cdev, bool autoresume);
+int loopback_add(struct usb_composite_dev *cdev, bool autoresume);
+
+#endif /* __G_ZERO_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/gadget_chips.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/gadget_chips.h
new file mode 100644
index 0000000..194b67d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/gadget_chips.h
@@ -0,0 +1,149 @@
+/*
+ * USB device controllers have lots of quirks.  Use these macros in
+ * gadget drivers or other code that needs to deal with them, and which
+ * autoconfigures instead of using early binding to the hardware.
+ *
+ * This SHOULD eventually work like the ARM mach_is_*() stuff, driven by
+ * some config file that gets updated as new hardware is supported.
+ * (And avoiding all runtime comparisons in typical one-choice configs!)
+ *
+ * NOTE:  some of these controller drivers may not be available yet.
+ * Some are available on 2.4 kernels; several are available, but not
+ * yet pushed in the 2.6 mainline tree.
+ */
+
+#ifndef __GADGET_CHIPS_H
+#define __GADGET_CHIPS_H
+
+/*
+ * NOTICE: the entries below are alphabetical and should be kept
+ * that way.
+ *
+ * Always be sure to add new entries to the correct position or
+ * accept the bashing later.
+ *
+ * If you have forgotten the alphabetical order let VIM/EMACS
+ * do that for you.
+ */
+#define gadget_is_amd5536udc(g)		(!strcmp("amd5536udc", (g)->name))
+#define gadget_is_at91(g)		(!strcmp("at91_udc", (g)->name))
+#define gadget_is_atmel_usba(g)		(!strcmp("atmel_usba_udc", (g)->name))
+#define gadget_is_ci13xxx_msm(g)	(!strcmp("ci13xxx_msm", (g)->name))
+#define gadget_is_ci13xxx_pci(g)	(!strcmp("ci13xxx_pci", (g)->name))
+#define gadget_is_dummy(g)		(!strcmp("dummy_udc", (g)->name))
+#define gadget_is_dwc3(g)		(!strcmp("dwc3-gadget", (g)->name))
+#define gadget_is_fsl_qe(g)		(!strcmp("fsl_qe_udc", (g)->name))
+#define gadget_is_fsl_usb2(g)		(!strcmp("fsl-usb2-udc", (g)->name))
+#define gadget_is_goku(g)		(!strcmp("goku_udc", (g)->name))
+#define gadget_is_imx(g)		(!strcmp("imx_udc", (g)->name))
+#define gadget_is_langwell(g)		(!strcmp("langwell_udc", (g)->name))
+#define gadget_is_m66592(g)		(!strcmp("m66592_udc", (g)->name))
+#define gadget_is_musbhdrc(g)		(!strcmp("musb-hdrc", (g)->name))
+#define gadget_is_net2272(g)		(!strcmp("net2272", (g)->name))
+#define gadget_is_net2280(g)		(!strcmp("net2280", (g)->name))
+#define gadget_is_omap(g)		(!strcmp("omap_udc", (g)->name))
+#define gadget_is_pch(g)		(!strcmp("pch_udc", (g)->name))
+#define gadget_is_pxa(g)		(!strcmp("pxa25x_udc", (g)->name))
+#define gadget_is_pxa27x(g)		(!strcmp("pxa27x_udc", (g)->name))
+#define gadget_is_r8a66597(g)		(!strcmp("r8a66597_udc", (g)->name))
+#define gadget_is_renesas_usbhs(g)	(!strcmp("renesas_usbhs_udc", (g)->name))
+#define gadget_is_s3c2410(g)		(!strcmp("s3c2410_udc", (g)->name))
+#define gadget_is_s3c_hsotg(g)		(!strcmp("s3c-hsotg", (g)->name))
+#define gadget_is_s3c_hsudc(g)		(!strcmp("s3c-hsudc", (g)->name))
+//#define gadget_is_dwc2(g)		(!strcmp("zx297510_hsotg.1", (g)->name))
+#define gadget_is_dwc2(g)		(!strcmp("dwc_otg_pcd", (g)->name))
+
+
+/**
+ * usb_gadget_controller_number - support bcdDevice id convention
+ * @gadget: the controller being driven
+ *
+ * Return a 2-digit BCD value associated with the peripheral controller,
+ * suitable for use as part of a bcdDevice value, or a negative error code.
+ *
+ * NOTE:  this convention is purely optional, and has no meaning in terms of
+ * any USB specification.  If you want to use a different convention in your
+ * gadget driver firmware -- maybe a more formal revision ID -- feel free.
+ *
+ * Hosts see these bcdDevice numbers, and are allowed (but not encouraged!)
+ * to change their behavior accordingly.  For example it might help avoiding
+ * some chip bug.
+ */
+static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
+{
+	if (gadget_is_net2280(gadget))
+		return 0x01;
+	else if (gadget_is_dummy(gadget))
+		return 0x02;
+	else if (gadget_is_pxa(gadget))
+		return 0x03;
+	else if (gadget_is_goku(gadget))
+		return 0x06;
+	else if (gadget_is_omap(gadget))
+		return 0x08;
+	else if (gadget_is_pxa27x(gadget))
+		return 0x11;
+	else if (gadget_is_s3c2410(gadget))
+		return 0x12;
+	else if (gadget_is_at91(gadget))
+		return 0x13;
+	else if (gadget_is_imx(gadget))
+		return 0x14;
+	else if (gadget_is_musbhdrc(gadget))
+		return 0x16;
+	else if (gadget_is_atmel_usba(gadget))
+		return 0x18;
+	else if (gadget_is_fsl_usb2(gadget))
+		return 0x19;
+	else if (gadget_is_amd5536udc(gadget))
+		return 0x20;
+	else if (gadget_is_m66592(gadget))
+		return 0x21;
+	else if (gadget_is_fsl_qe(gadget))
+		return 0x22;
+	else if (gadget_is_ci13xxx_pci(gadget))
+		return 0x23;
+	else if (gadget_is_langwell(gadget))
+		return 0x24;
+	else if (gadget_is_r8a66597(gadget))
+		return 0x25;
+	else if (gadget_is_s3c_hsotg(gadget))
+		return 0x26;
+	else if (gadget_is_pch(gadget))
+		return 0x27;
+	else if (gadget_is_ci13xxx_msm(gadget))
+		return 0x28;
+	else if (gadget_is_renesas_usbhs(gadget))
+		return 0x29;
+	else if (gadget_is_s3c_hsudc(gadget))
+		return 0x30;
+	else if (gadget_is_net2272(gadget))
+		return 0x31;
+	else if (gadget_is_dwc3(gadget))
+		return 0x32;
+	else if(gadget_is_dwc2(gadget))
+		return 0x33;
+
+	return -ENOENT;
+}
+
+
+/**
+ * gadget_supports_altsettings - return true if altsettings work
+ * @gadget: the gadget in question
+ */
+static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
+{
+	/* PXA 21x/25x/26x has no altsettings at all */
+	if (gadget_is_pxa(gadget))
+		return false;
+
+	/* PXA 27x and 3xx have *broken* altsetting support */
+	if (gadget_is_pxa27x(gadget))
+		return false;
+
+	/* Everything else is *presumably* fine ... */
+	return true;
+}
+
+#endif /* __GADGET_CHIPS_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/gmidi.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/gmidi.c
new file mode 100644
index 0000000..681bd03
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/gmidi.c
@@ -0,0 +1,206 @@
+/*
+ * gmidi.c -- USB MIDI Gadget Driver
+ *
+ * Copyright (C) 2006 Thumtronics Pty Ltd.
+ * Developed for Thumtronics by Grey Innovation
+ * Ben Williamson <ben.williamson@greyinnovation.com>
+ *
+ * This software is distributed under the terms of the GNU General Public
+ * License ("GPL") version 2, as published by the Free Software Foundation.
+ *
+ * This code is based in part on:
+ *
+ * Gadget Zero driver, Copyright (C) 2003-2004 David Brownell.
+ * USB Audio driver, Copyright (C) 2002 by Takashi Iwai.
+ * USB MIDI driver, Copyright (C) 2002-2005 Clemens Ladisch.
+ *
+ * Refer to the USB Device Class Definition for MIDI Devices:
+ * http://www.usb.org/developers/devclass_docs/midi10.pdf
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/device.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/rawmidi.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/midi.h>
+
+#include "gadget_chips.h"
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "f_midi.c"
+
+/*-------------------------------------------------------------------------*/
+
+MODULE_AUTHOR("Ben Williamson");
+MODULE_LICENSE("GPL v2");
+
+static const char shortname[] = "g_midi";
+static const char longname[] = "MIDI Gadget";
+
+static int index = SNDRV_DEFAULT_IDX1;
+module_param(index, int, S_IRUGO);
+MODULE_PARM_DESC(index, "Index value for the USB MIDI Gadget adapter.");
+
+static char *id = SNDRV_DEFAULT_STR1;
+module_param(id, charp, S_IRUGO);
+MODULE_PARM_DESC(id, "ID string for the USB MIDI Gadget adapter.");
+
+static unsigned int buflen = 256;
+module_param(buflen, uint, S_IRUGO);
+MODULE_PARM_DESC(buflen, "MIDI buffer length");
+
+static unsigned int qlen = 32;
+module_param(qlen, uint, S_IRUGO);
+MODULE_PARM_DESC(qlen, "USB read request queue length");
+
+static unsigned int in_ports = 1;
+module_param(in_ports, uint, S_IRUGO);
+MODULE_PARM_DESC(in_ports, "Number of MIDI input ports");
+
+static unsigned int out_ports = 1;
+module_param(out_ports, uint, S_IRUGO);
+MODULE_PARM_DESC(out_ports, "Number of MIDI output ports");
+
+/* Thanks to Grey Innovation for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+#define DRIVER_VENDOR_NUM	0x17b3		/* Grey Innovation */
+#define DRIVER_PRODUCT_NUM	0x0004		/* Linux-USB "MIDI Gadget" */
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_DESCRIPTION_IDX		2
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		USB_DT_DEVICE_SIZE,
+	.bDescriptorType =	USB_DT_DEVICE,
+	.bcdUSB =		__constant_cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+	.idVendor =		__constant_cpu_to_le16(DRIVER_VENDOR_NUM),
+	.idProduct =		__constant_cpu_to_le16(DRIVER_PRODUCT_NUM),
+	/* .iManufacturer =	DYNAMIC */
+	/* .iProduct =		DYNAMIC */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s	= "Grey Innovation",
+	[STRING_PRODUCT_IDX].s		= "MIDI Gadget",
+	[STRING_DESCRIPTION_IDX].s	= "MIDI",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static int __exit midi_unbind(struct usb_composite_dev *dev)
+{
+	return 0;
+}
+
+static struct usb_configuration midi_config = {
+	.label		= "MIDI Gadget",
+	.bConfigurationValue = 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes	= USB_CONFIG_ATT_ONE,
+	.bMaxPower	= CONFIG_USB_GADGET_VBUS_DRAW / 2,
+};
+
+static int __init midi_bind_config(struct usb_configuration *c)
+{
+	return f_midi_bind_config(c, index, id,
+				  in_ports, out_ports,
+				  buflen, qlen);
+}
+
+static int __init midi_bind(struct usb_composite_dev *cdev)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	int gcnum, status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		return status;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		return status;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	/* config description */
+	status = usb_string_id(cdev);
+	if (status < 0)
+		return status;
+	strings_dev[STRING_DESCRIPTION_IDX].id = status;
+
+	midi_config.iConfiguration = status;
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum < 0) {
+		/* gmidi is so simple (no altsettings) that
+		 * it SHOULD NOT have problems with bulk-capable hardware.
+		 * so warn about unrecognized controllers, don't panic.
+		 */
+		pr_warning("%s: controller '%s' not recognized\n",
+			   __func__, gadget->name);
+		device_desc.bcdDevice = cpu_to_le16(0x9999);
+	} else {
+		device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
+	}
+
+	status = usb_add_config(cdev, &midi_config, midi_bind_config);
+	if (status < 0)
+		return status;
+
+	pr_info("%s\n", longname);
+	return 0;
+}
+
+static struct usb_composite_driver midi_driver = {
+	.name		= (char *) longname,
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= __exit_p(midi_unbind),
+};
+
+static int __init midi_init(void)
+{
+	return usb_composite_probe(&midi_driver, midi_bind);
+}
+module_init(midi_init);
+
+static void __exit midi_cleanup(void)
+{
+	usb_composite_unregister(&midi_driver);
+}
+module_exit(midi_cleanup);
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/goku_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/goku_udc.c
new file mode 100644
index 0000000..e151d6b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/goku_udc.c
@@ -0,0 +1,1908 @@
+/*
+ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
+ *
+ * Copyright (C) 2000-2002 Lineo
+ *      by Stuart Lynne, Tom Rushworth, and Bruce Balden
+ * Copyright (C) 2002 Toshiba Corporation
+ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
+ *
+ *  - Endpoint numbering is fixed: ep{1,2,3}-bulk
+ *  - Gadget drivers can choose ep maxpacket (8/16/32/64)
+ *  - Gadget drivers can choose direction (IN, OUT)
+ *  - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
+ */
+
+// #define	VERBOSE		/* extra debug messages (success too) */
+// #define	USB_TRACE	/* packet-level success messages */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+
+#include "goku_udc.h"
+
+#define	DRIVER_DESC		"TC86C001 USB Device Controller"
+#define	DRIVER_VERSION		"30-Oct 2003"
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+static const char driver_name [] = "goku_udc";
+static const char driver_desc [] = DRIVER_DESC;
+
+MODULE_AUTHOR("source@mvista.com");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+
+/*
+ * IN dma behaves ok under testing, though the IN-dma abort paths don't
+ * seem to behave quite as expected.  Used by default.
+ *
+ * OUT dma documents design problems handling the common "short packet"
+ * transfer termination policy; it couldn't be enabled by default, even
+ * if the OUT-dma abort problems had a resolution.
+ */
+static unsigned use_dma = 1;
+
+#if 0
+//#include <linux/moduleparam.h>
+/* "modprobe goku_udc use_dma=1" etc
+ *	0 to disable dma
+ *	1 to use IN dma only (normal operation)
+ *	2 to use IN and OUT dma
+ */
+module_param(use_dma, uint, S_IRUGO);
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static void nuke(struct goku_ep *, int status);
+
+static inline void
+command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
+{
+	writel(COMMAND_EP(epnum) | command, &regs->Command);
+	udelay(300);
+}
+
+static int
+goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct goku_udc	*dev;
+	struct goku_ep	*ep;
+	u32		mode;
+	u16		max;
+	unsigned long	flags;
+
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+	dev = ep->dev;
+	if (ep == &dev->ep[0])
+		return -EINVAL;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+	if (ep->num != usb_endpoint_num(desc))
+		return -EINVAL;
+
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_BULK:
+	case USB_ENDPOINT_XFER_INT:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
+			!= EPxSTATUS_EP_INVALID)
+		return -EBUSY;
+
+	/* enabling the no-toggle interrupt mode would need an api hook */
+	mode = 0;
+	max = get_unaligned_le16(&desc->wMaxPacketSize);
+	switch (max) {
+	case 64:	mode++;
+	case 32:	mode++;
+	case 16:	mode++;
+	case 8:		mode <<= 3;
+			break;
+	default:
+		return -EINVAL;
+	}
+	mode |= 2 << 1;		/* bulk, or intr-with-toggle */
+
+	/* ep1/ep2 dma direction is chosen early; it works in the other
+	 * direction, with pio.  be cautious with out-dma.
+	 */
+	ep->is_in = usb_endpoint_dir_in(desc);
+	if (ep->is_in) {
+		mode |= 1;
+		ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
+	} else {
+		ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
+		if (ep->dma)
+			DBG(dev, "%s out-dma hides short packets\n",
+				ep->ep.name);
+	}
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+
+	/* ep1 and ep2 can do double buffering and/or dma */
+	if (ep->num < 3) {
+		struct goku_udc_regs __iomem	*regs = ep->dev->regs;
+		u32				tmp;
+
+		/* double buffer except (for now) with pio in */
+		tmp = ((ep->dma || !ep->is_in)
+				? 0x10	/* double buffered */
+				: 0x11	/* single buffer */
+			) << ep->num;
+		tmp |= readl(&regs->EPxSingle);
+		writel(tmp, &regs->EPxSingle);
+
+		tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
+		tmp |= readl(&regs->EPxBCS);
+		writel(tmp, &regs->EPxBCS);
+	}
+	writel(mode, ep->reg_mode);
+	command(ep->dev->regs, COMMAND_RESET, ep->num);
+	ep->ep.maxpacket = max;
+	ep->stopped = 0;
+	ep->desc = desc;
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+	DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
+		ep->is_in ? "IN" : "OUT",
+		ep->dma ? "dma" : "pio",
+		max);
+
+	return 0;
+}
+
+static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
+{
+	struct goku_udc		*dev = ep->dev;
+
+	if (regs) {
+		command(regs, COMMAND_INVALID, ep->num);
+		if (ep->num) {
+			if (ep->num == UDC_MSTWR_ENDPOINT)
+				dev->int_enable &= ~(INT_MSTWREND
+							|INT_MSTWRTMOUT);
+			else if (ep->num == UDC_MSTRD_ENDPOINT)
+				dev->int_enable &= ~INT_MSTRDEND;
+			dev->int_enable &= ~INT_EPxDATASET (ep->num);
+		} else
+			dev->int_enable &= ~INT_EP0;
+		writel(dev->int_enable, &regs->int_enable);
+		readl(&regs->int_enable);
+		if (ep->num < 3) {
+			struct goku_udc_regs __iomem	*r = ep->dev->regs;
+			u32				tmp;
+
+			tmp = readl(&r->EPxSingle);
+			tmp &= ~(0x11 << ep->num);
+			writel(tmp, &r->EPxSingle);
+
+			tmp = readl(&r->EPxBCS);
+			tmp &= ~(0x11 << ep->num);
+			writel(tmp, &r->EPxBCS);
+		}
+		/* reset dma in case we're still using it */
+		if (ep->dma) {
+			u32	master;
+
+			master = readl(&regs->dma_master) & MST_RW_BITS;
+			if (ep->num == UDC_MSTWR_ENDPOINT) {
+				master &= ~MST_W_BITS;
+				master |= MST_WR_RESET;
+			} else {
+				master &= ~MST_R_BITS;
+				master |= MST_RD_RESET;
+			}
+			writel(master, &regs->dma_master);
+		}
+	}
+
+	ep->ep.maxpacket = MAX_FIFO_SIZE;
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->stopped = 1;
+	ep->irqs = 0;
+	ep->dma = 0;
+}
+
+static int goku_ep_disable(struct usb_ep *_ep)
+{
+	struct goku_ep	*ep;
+	struct goku_udc	*dev;
+	unsigned long	flags;
+
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (!_ep || !ep->desc)
+		return -ENODEV;
+	dev = ep->dev;
+	if (dev->ep0state == EP0_SUSPEND)
+		return -EBUSY;
+
+	VDBG(dev, "disable %s\n", _ep->name);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	nuke(ep, -ESHUTDOWN);
+	ep_reset(dev->regs, ep);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct goku_request	*req;
+
+	if (!_ep)
+		return NULL;
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+	return &req->req;
+}
+
+static void
+goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct goku_request	*req;
+
+	if (!_ep || !_req)
+		return;
+
+	req = container_of(_req, struct goku_request, req);
+	WARN_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+done(struct goku_ep *ep, struct goku_request *req, int status)
+{
+	struct goku_udc		*dev;
+	unsigned		stopped = ep->stopped;
+
+	list_del_init(&req->queue);
+
+	if (likely(req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	dev = ep->dev;
+
+	if (ep->dma)
+		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
+
+#ifndef USB_TRACE
+	if (status && status != -ESHUTDOWN)
+#endif
+		VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock(&dev->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&dev->lock);
+	ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
+{
+	unsigned	length, count;
+
+	length = min(req->req.length - req->req.actual, max);
+	req->req.actual += length;
+
+	count = length;
+	while (likely(count--))
+		writel(*buf++, fifo);
+	return length;
+}
+
+// return:  0 = still running, 1 = completed, negative = errno
+static int write_fifo(struct goku_ep *ep, struct goku_request *req)
+{
+	struct goku_udc	*dev = ep->dev;
+	u32		tmp;
+	u8		*buf;
+	unsigned	count;
+	int		is_last;
+
+	tmp = readl(&dev->regs->DataSet);
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	dev = ep->dev;
+	if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
+		return -EL2HLT;
+
+	/* NOTE:  just single-buffered PIO-IN for now.  */
+	if (unlikely((tmp & DATASET_A(ep->num)) != 0))
+		return 0;
+
+	/* clear our "packet available" irq */
+	if (ep->num != 0)
+		writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
+
+	count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
+
+	/* last packet often short (sometimes a zlp, especially on ep0) */
+	if (unlikely(count != ep->ep.maxpacket)) {
+		writel(~(1<<ep->num), &dev->regs->EOP);
+		if (ep->num == 0) {
+			dev->ep[0].stopped = 1;
+			dev->ep0state = EP0_STATUS;
+		}
+		is_last = 1;
+	} else {
+		if (likely(req->req.length != req->req.actual)
+				|| req->req.zero)
+			is_last = 0;
+		else
+			is_last = 1;
+	}
+#if 0		/* printk seemed to trash is_last...*/
+//#ifdef USB_TRACE
+	VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
+		ep->ep.name, count, is_last ? "/last" : "",
+		req->req.length - req->req.actual, req);
+#endif
+
+	/* requests complete when all IN data is in the FIFO,
+	 * or sometimes later, if a zlp was needed.
+	 */
+	if (is_last) {
+		done(ep, req, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int read_fifo(struct goku_ep *ep, struct goku_request *req)
+{
+	struct goku_udc_regs __iomem	*regs;
+	u32				size, set;
+	u8				*buf;
+	unsigned			bufferspace, is_short, dbuff;
+
+	regs = ep->dev->regs;
+top:
+	buf = req->req.buf + req->req.actual;
+	prefetchw(buf);
+
+	if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
+		return -EL2HLT;
+
+	dbuff = (ep->num == 1 || ep->num == 2);
+	do {
+		/* ack dataset irq matching the status we'll handle */
+		if (ep->num != 0)
+			writel(~INT_EPxDATASET(ep->num), &regs->int_status);
+
+		set = readl(&regs->DataSet) & DATASET_AB(ep->num);
+		size = readl(&regs->EPxSizeLA[ep->num]);
+		bufferspace = req->req.length - req->req.actual;
+
+		/* usually do nothing without an OUT packet */
+		if (likely(ep->num != 0 || bufferspace != 0)) {
+			if (unlikely(set == 0))
+				break;
+			/* use ep1/ep2 double-buffering for OUT */
+			if (!(size & PACKET_ACTIVE))
+				size = readl(&regs->EPxSizeLB[ep->num]);
+			if (!(size & PACKET_ACTIVE))	/* "can't happen" */
+				break;
+			size &= DATASIZE;	/* EPxSizeH == 0 */
+
+		/* ep0out no-out-data case for set_config, etc */
+		} else
+			size = 0;
+
+		/* read all bytes from this packet */
+		req->req.actual += size;
+		is_short = (size < ep->ep.maxpacket);
+#ifdef USB_TRACE
+		VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
+			ep->ep.name, size, is_short ? "/S" : "",
+			req, req->req.actual, req->req.length);
+#endif
+		while (likely(size-- != 0)) {
+			u8	byte = (u8) readl(ep->reg_fifo);
+
+			if (unlikely(bufferspace == 0)) {
+				/* this happens when the driver's buffer
+				 * is smaller than what the host sent.
+				 * discard the extra data in this packet.
+				 */
+				if (req->req.status != -EOVERFLOW)
+					DBG(ep->dev, "%s overflow %u\n",
+						ep->ep.name, size);
+				req->req.status = -EOVERFLOW;
+			} else {
+				*buf++ = byte;
+				bufferspace--;
+			}
+		}
+
+		/* completion */
+		if (unlikely(is_short || req->req.actual == req->req.length)) {
+			if (unlikely(ep->num == 0)) {
+				/* non-control endpoints now usable? */
+				if (ep->dev->req_config)
+					writel(ep->dev->configured
+							? USBSTATE_CONFIGURED
+							: 0,
+						&regs->UsbState);
+				/* ep0out status stage */
+				writel(~(1<<0), &regs->EOP);
+				ep->stopped = 1;
+				ep->dev->ep0state = EP0_STATUS;
+			}
+			done(ep, req, 0);
+
+			/* empty the second buffer asap */
+			if (dbuff && !list_empty(&ep->queue)) {
+				req = list_entry(ep->queue.next,
+						struct goku_request, queue);
+				goto top;
+			}
+			return 1;
+		}
+	} while (dbuff);
+	return 0;
+}
+
+static inline void
+pio_irq_enable(struct goku_udc *dev,
+		struct goku_udc_regs __iomem *regs, int epnum)
+{
+	dev->int_enable |= INT_EPxDATASET (epnum);
+	writel(dev->int_enable, &regs->int_enable);
+	/* write may still be posted */
+}
+
+static inline void
+pio_irq_disable(struct goku_udc *dev,
+		struct goku_udc_regs __iomem *regs, int epnum)
+{
+	dev->int_enable &= ~INT_EPxDATASET (epnum);
+	writel(dev->int_enable, &regs->int_enable);
+	/* write may still be posted */
+}
+
+static inline void
+pio_advance(struct goku_ep *ep)
+{
+	struct goku_request	*req;
+
+	if (unlikely(list_empty (&ep->queue)))
+		return;
+	req = list_entry(ep->queue.next, struct goku_request, queue);
+	(ep->is_in ? write_fifo : read_fifo)(ep, req);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+// return:  0 = q running, 1 = q stopped, negative = errno
+static int start_dma(struct goku_ep *ep, struct goku_request *req)
+{
+	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
+	u32				master;
+	u32				start = req->req.dma;
+	u32				end = start + req->req.length - 1;
+
+	master = readl(&regs->dma_master) & MST_RW_BITS;
+
+	/* re-init the bits affecting IN dma; careful with zlps */
+	if (likely(ep->is_in)) {
+		if (unlikely(master & MST_RD_ENA)) {
+			DBG (ep->dev, "start, IN active dma %03x!!\n",
+				master);
+//			return -EL2HLT;
+		}
+		writel(end, &regs->in_dma_end);
+		writel(start, &regs->in_dma_start);
+
+		master &= ~MST_R_BITS;
+		if (unlikely(req->req.length == 0))
+			master = MST_RD_ENA | MST_RD_EOPB;
+		else if ((req->req.length % ep->ep.maxpacket) != 0
+					|| req->req.zero)
+			master = MST_RD_ENA | MST_EOPB_ENA;
+		else
+			master = MST_RD_ENA | MST_EOPB_DIS;
+
+		ep->dev->int_enable |= INT_MSTRDEND;
+
+	/* Goku DMA-OUT merges short packets, which plays poorly with
+	 * protocols where short packets mark the transfer boundaries.
+	 * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
+	 * ending transfers after 3 SOFs; we don't turn it on.
+	 */
+	} else {
+		if (unlikely(master & MST_WR_ENA)) {
+			DBG (ep->dev, "start, OUT active dma %03x!!\n",
+				master);
+//			return -EL2HLT;
+		}
+		writel(end, &regs->out_dma_end);
+		writel(start, &regs->out_dma_start);
+
+		master &= ~MST_W_BITS;
+		master |= MST_WR_ENA | MST_TIMEOUT_DIS;
+
+		ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
+	}
+
+	writel(master, &regs->dma_master);
+	writel(ep->dev->int_enable, &regs->int_enable);
+	return 0;
+}
+
+static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
+{
+	struct goku_request		*req;
+	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
+	u32				master;
+
+	master = readl(&regs->dma_master);
+
+	if (unlikely(list_empty(&ep->queue))) {
+stop:
+		if (ep->is_in)
+			dev->int_enable &= ~INT_MSTRDEND;
+		else
+			dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
+		writel(dev->int_enable, &regs->int_enable);
+		return;
+	}
+	req = list_entry(ep->queue.next, struct goku_request, queue);
+
+	/* normal hw dma completion (not abort) */
+	if (likely(ep->is_in)) {
+		if (unlikely(master & MST_RD_ENA))
+			return;
+		req->req.actual = readl(&regs->in_dma_current);
+	} else {
+		if (unlikely(master & MST_WR_ENA))
+			return;
+
+		/* hardware merges short packets, and also hides packet
+		 * overruns.  a partial packet MAY be in the fifo here.
+		 */
+		req->req.actual = readl(&regs->out_dma_current);
+	}
+	req->req.actual -= req->req.dma;
+	req->req.actual++;
+
+#ifdef USB_TRACE
+	VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
+		ep->ep.name, ep->is_in ? "IN" : "OUT",
+		req->req.actual, req->req.length, req);
+#endif
+	done(ep, req, 0);
+	if (list_empty(&ep->queue))
+		goto stop;
+	req = list_entry(ep->queue.next, struct goku_request, queue);
+	(void) start_dma(ep, req);
+}
+
+static void abort_dma(struct goku_ep *ep, int status)
+{
+	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
+	struct goku_request		*req;
+	u32				curr, master;
+
+	/* NAK future host requests, hoping the implicit delay lets the
+	 * dma engine finish reading (or writing) its latest packet and
+	 * empty the dma buffer (up to 16 bytes).
+	 *
+	 * This avoids needing to clean up a partial packet in the fifo;
+	 * we can't do that for IN without side effects to HALT and TOGGLE.
+	 */
+	command(regs, COMMAND_FIFO_DISABLE, ep->num);
+	req = list_entry(ep->queue.next, struct goku_request, queue);
+	master = readl(&regs->dma_master) & MST_RW_BITS;
+
+	/* FIXME using these resets isn't usably documented. this may
+	 * not work unless it's followed by disabling the endpoint.
+	 *
+	 * FIXME the OUT reset path doesn't even behave consistently.
+	 */
+	if (ep->is_in) {
+		if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
+			goto finished;
+		curr = readl(&regs->in_dma_current);
+
+		writel(curr, &regs->in_dma_end);
+		writel(curr, &regs->in_dma_start);
+
+		master &= ~MST_R_BITS;
+		master |= MST_RD_RESET;
+		writel(master, &regs->dma_master);
+
+		if (readl(&regs->dma_master) & MST_RD_ENA)
+			DBG(ep->dev, "IN dma active after reset!\n");
+
+	} else {
+		if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
+			goto finished;
+		curr = readl(&regs->out_dma_current);
+
+		writel(curr, &regs->out_dma_end);
+		writel(curr, &regs->out_dma_start);
+
+		master &= ~MST_W_BITS;
+		master |= MST_WR_RESET;
+		writel(master, &regs->dma_master);
+
+		if (readl(&regs->dma_master) & MST_WR_ENA)
+			DBG(ep->dev, "OUT dma active after reset!\n");
+	}
+	req->req.actual = (curr - req->req.dma) + 1;
+	req->req.status = status;
+
+	VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
+		ep->is_in ? "IN" : "OUT",
+		req->req.actual, req->req.length);
+
+	command(regs, COMMAND_FIFO_ENABLE, ep->num);
+
+	return;
+
+finished:
+	/* dma already completed; no abort needed */
+	command(regs, COMMAND_FIFO_ENABLE, ep->num);
+	req->req.actual = req->req.length;
+	req->req.status = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct goku_request	*req;
+	struct goku_ep		*ep;
+	struct goku_udc		*dev;
+	unsigned long		flags;
+	int			status;
+
+	/* always require a cpu-view buffer so pio works */
+	req = container_of(_req, struct goku_request, req);
+	if (unlikely(!_req || !_req->complete
+			|| !_req->buf || !list_empty(&req->queue)))
+		return -EINVAL;
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
+		return -EINVAL;
+	dev = ep->dev;
+	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+
+	/* can't touch registers when suspended */
+	if (dev->ep0state == EP0_SUSPEND)
+		return -EBUSY;
+
+	/* set up dma mapping in case the caller didn't */
+	if (ep->dma) {
+		status = usb_gadget_map_request(&dev->gadget, &req->req,
+				ep->is_in);
+		if (status)
+			return status;
+	}
+
+#ifdef USB_TRACE
+	VDBG(dev, "%s queue req %p, len %u buf %p\n",
+			_ep->name, _req, _req->length, _req->buf);
+#endif
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/* for ep0 IN without premature status, zlp is required and
+	 * writing EOP starts the status stage (OUT).
+	 */
+	if (unlikely(ep->num == 0 && ep->is_in))
+		_req->zero = 1;
+
+	/* kickstart this i/o queue? */
+	status = 0;
+	if (list_empty(&ep->queue) && likely(!ep->stopped)) {
+		/* dma:  done after dma completion IRQ (or error)
+		 * pio:  done after last fifo operation
+		 */
+		if (ep->dma)
+			status = start_dma(ep, req);
+		else
+			status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
+
+		if (unlikely(status != 0)) {
+			if (status > 0)
+				status = 0;
+			req = NULL;
+		}
+
+	} /* else pio or dma irq handler advances the queue. */
+
+	if (likely(req != 0))
+		list_add_tail(&req->queue, &ep->queue);
+
+	if (likely(!list_empty(&ep->queue))
+			&& likely(ep->num != 0)
+			&& !ep->dma
+			&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
+		pio_irq_enable(dev, dev->regs, ep->num);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* pci writes may still be posted */
+	return status;
+}
+
+/* dequeue ALL requests */
+static void nuke(struct goku_ep *ep, int status)
+{
+	struct goku_request	*req;
+
+	ep->stopped = 1;
+	if (list_empty(&ep->queue))
+		return;
+	if (ep->dma)
+		abort_dma(ep, status);
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct goku_request, queue);
+		done(ep, req, status);
+	}
+}
+
+/* dequeue JUST ONE request */
+static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct goku_request	*req;
+	struct goku_ep		*ep;
+	struct goku_udc		*dev;
+	unsigned long		flags;
+
+	ep = container_of(_ep, struct goku_ep, ep);
+	if (!_ep || !_req || (!ep->desc && ep->num != 0))
+		return -EINVAL;
+	dev = ep->dev;
+	if (!dev->driver)
+		return -ESHUTDOWN;
+
+	/* we can't touch (dma) registers when suspended */
+	if (dev->ep0state == EP0_SUSPEND)
+		return -EBUSY;
+
+	VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
+		ep->is_in ? "IN" : "OUT",
+		ep->dma ? "dma" : "pio",
+		_req);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore (&dev->lock, flags);
+		return -EINVAL;
+	}
+
+	if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
+		abort_dma(ep, -ECONNRESET);
+		done(ep, req, -ECONNRESET);
+		dma_advance(dev, ep);
+	} else if (!list_empty(&req->queue))
+		done(ep, req, -ECONNRESET);
+	else
+		req = NULL;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return req ? 0 : -EOPNOTSUPP;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void goku_clear_halt(struct goku_ep *ep)
+{
+	// assert (ep->num !=0)
+	VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
+	command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
+	command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
+	if (ep->stopped) {
+		ep->stopped = 0;
+		if (ep->dma) {
+			struct goku_request	*req;
+
+			if (list_empty(&ep->queue))
+				return;
+			req = list_entry(ep->queue.next, struct goku_request,
+						queue);
+			(void) start_dma(ep, req);
+		} else
+			pio_advance(ep);
+	}
+}
+
+static int goku_set_halt(struct usb_ep *_ep, int value)
+{
+	struct goku_ep	*ep;
+	unsigned long	flags;
+	int		retval = 0;
+
+	if (!_ep)
+		return -ENODEV;
+	ep = container_of (_ep, struct goku_ep, ep);
+
+	if (ep->num == 0) {
+		if (value) {
+			ep->dev->ep0state = EP0_STALL;
+			ep->dev->ep[0].stopped = 1;
+		} else
+			return -EINVAL;
+
+	/* don't change EPxSTATUS_EP_INVALID to READY */
+	} else if (!ep->desc) {
+		DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+	if (!list_empty(&ep->queue))
+		retval = -EAGAIN;
+	else if (ep->is_in && value
+			/* data in (either) packet buffer? */
+			&& (readl(&ep->dev->regs->DataSet)
+					& DATASET_AB(ep->num)))
+		retval = -EAGAIN;
+	else if (!value)
+		goku_clear_halt(ep);
+	else {
+		ep->stopped = 1;
+		VDBG(ep->dev, "%s set halt\n", ep->ep.name);
+		command(ep->dev->regs, COMMAND_STALL, ep->num);
+		readl(ep->reg_status);
+	}
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+	return retval;
+}
+
+static int goku_fifo_status(struct usb_ep *_ep)
+{
+	struct goku_ep			*ep;
+	struct goku_udc_regs __iomem	*regs;
+	u32				size;
+
+	if (!_ep)
+		return -ENODEV;
+	ep = container_of(_ep, struct goku_ep, ep);
+
+	/* size is only reported sanely for OUT */
+	if (ep->is_in)
+		return -EOPNOTSUPP;
+
+	/* ignores 16-byte dma buffer; SizeH == 0 */
+	regs = ep->dev->regs;
+	size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
+	size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
+	VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
+	return size;
+}
+
+static void goku_fifo_flush(struct usb_ep *_ep)
+{
+	struct goku_ep			*ep;
+	struct goku_udc_regs __iomem	*regs;
+	u32				size;
+
+	if (!_ep)
+		return;
+	ep = container_of(_ep, struct goku_ep, ep);
+	VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
+
+	/* don't change EPxSTATUS_EP_INVALID to READY */
+	if (!ep->desc && ep->num != 0) {
+		DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
+		return;
+	}
+
+	regs = ep->dev->regs;
+	size = readl(&regs->EPxSizeLA[ep->num]);
+	size &= DATASIZE;
+
+	/* Non-desirable behavior:  FIFO_CLEAR also clears the
+	 * endpoint halt feature.  For OUT, we _could_ just read
+	 * the bytes out (PIO, if !ep->dma); for in, no choice.
+	 */
+	if (size)
+		command(regs, COMMAND_FIFO_CLEAR, ep->num);
+}
+
+static struct usb_ep_ops goku_ep_ops = {
+	.enable		= goku_ep_enable,
+	.disable	= goku_ep_disable,
+
+	.alloc_request	= goku_alloc_request,
+	.free_request	= goku_free_request,
+
+	.queue		= goku_queue,
+	.dequeue	= goku_dequeue,
+
+	.set_halt	= goku_set_halt,
+	.fifo_status	= goku_fifo_status,
+	.fifo_flush	= goku_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int goku_get_frame(struct usb_gadget *_gadget)
+{
+	return -EOPNOTSUPP;
+}
+
+static int goku_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int goku_stop(struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops goku_ops = {
+	.get_frame	= goku_get_frame,
+	.start		= goku_start,
+	.stop		= goku_stop,
+	// no remote wakeup
+	// not selfpowered
+};
+
+/*-------------------------------------------------------------------------*/
+
+static inline char *dmastr(void)
+{
+	if (use_dma == 0)
+		return "(dma disabled)";
+	else if (use_dma == 2)
+		return "(dma IN and OUT)";
+	else
+		return "(dma IN)";
+}
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static const char proc_node_name [] = "driver/udc";
+
+#define FOURBITS "%s%s%s%s"
+#define EIGHTBITS FOURBITS FOURBITS
+
+static void
+dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
+{
+	int t;
+
+	/* int_status is the same format ... */
+	t = scnprintf(*next, *size,
+		"%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
+		label, mask,
+		(mask & INT_PWRDETECT) ? " power" : "",
+		(mask & INT_SYSERROR) ? " sys" : "",
+		(mask & INT_MSTRDEND) ? " in-dma" : "",
+		(mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
+
+		(mask & INT_MSTWREND) ? " out-dma" : "",
+		(mask & INT_MSTWRSET) ? " wrset" : "",
+		(mask & INT_ERR) ? " err" : "",
+		(mask & INT_SOF) ? " sof" : "",
+
+		(mask & INT_EP3NAK) ? " ep3nak" : "",
+		(mask & INT_EP2NAK) ? " ep2nak" : "",
+		(mask & INT_EP1NAK) ? " ep1nak" : "",
+		(mask & INT_EP3DATASET) ? " ep3" : "",
+
+		(mask & INT_EP2DATASET) ? " ep2" : "",
+		(mask & INT_EP1DATASET) ? " ep1" : "",
+		(mask & INT_STATUSNAK) ? " ep0snak" : "",
+		(mask & INT_STATUS) ? " ep0status" : "",
+
+		(mask & INT_SETUP) ? " setup" : "",
+		(mask & INT_ENDPOINT0) ? " ep0" : "",
+		(mask & INT_USBRESET) ? " reset" : "",
+		(mask & INT_SUSPEND) ? " suspend" : "");
+	*size -= t;
+	*next += t;
+}
+
+
+static int
+udc_proc_read(char *buffer, char **start, off_t off, int count,
+		int *eof, void *_dev)
+{
+	char				*buf = buffer;
+	struct goku_udc			*dev = _dev;
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+	char				*next = buf;
+	unsigned			size = count;
+	unsigned long			flags;
+	int				i, t, is_usb_connected;
+	u32				tmp;
+
+	if (off != 0)
+		return 0;
+
+	local_irq_save(flags);
+
+	/* basic device status */
+	tmp = readl(&regs->power_detect);
+	is_usb_connected = tmp & PW_DETECT;
+	t = scnprintf(next, size,
+		"%s - %s\n"
+		"%s version: %s %s\n"
+		"Gadget driver: %s\n"
+		"Host %s, %s\n"
+		"\n",
+		pci_name(dev->pdev), driver_desc,
+		driver_name, DRIVER_VERSION, dmastr(),
+		dev->driver ? dev->driver->driver.name : "(none)",
+		is_usb_connected
+			? ((tmp & PW_PULLUP) ? "full speed" : "powered")
+			: "disconnected",
+		({char *state;
+		switch(dev->ep0state){
+		case EP0_DISCONNECT:	state = "ep0_disconnect"; break;
+		case EP0_IDLE:		state = "ep0_idle"; break;
+		case EP0_IN:		state = "ep0_in"; break;
+		case EP0_OUT:		state = "ep0_out"; break;
+		case EP0_STATUS:	state = "ep0_status"; break;
+		case EP0_STALL:		state = "ep0_stall"; break;
+		case EP0_SUSPEND:	state = "ep0_suspend"; break;
+		default:		state = "ep0_?"; break;
+		} state; })
+		);
+	size -= t;
+	next += t;
+
+	dump_intmask("int_status", readl(&regs->int_status), &next, &size);
+	dump_intmask("int_enable", readl(&regs->int_enable), &next, &size);
+
+	if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
+		goto done;
+
+	/* registers for (active) device and ep0 */
+	t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
+			"single.bcs %02x.%02x state %x addr %u\n",
+			dev->irqs, readl(&regs->DataSet),
+			readl(&regs->EPxSingle), readl(&regs->EPxBCS),
+			readl(&regs->UsbState),
+			readl(&regs->address));
+	size -= t;
+	next += t;
+
+	tmp = readl(&regs->dma_master);
+	t = scnprintf(next, size,
+		"dma %03X =" EIGHTBITS "%s %s\n", tmp,
+		(tmp & MST_EOPB_DIS) ? " eopb-" : "",
+		(tmp & MST_EOPB_ENA) ? " eopb+" : "",
+		(tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
+		(tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
+
+		(tmp & MST_RD_EOPB) ? " eopb" : "",
+		(tmp & MST_RD_RESET) ? " in_reset" : "",
+		(tmp & MST_WR_RESET) ? " out_reset" : "",
+		(tmp & MST_RD_ENA) ? " IN" : "",
+
+		(tmp & MST_WR_ENA) ? " OUT" : "",
+		(tmp & MST_CONNECTION)
+			? "ep1in/ep2out"
+			: "ep1out/ep2in");
+	size -= t;
+	next += t;
+
+	/* dump endpoint queues */
+	for (i = 0; i < 4; i++) {
+		struct goku_ep		*ep = &dev->ep [i];
+		struct goku_request	*req;
+
+		if (i && !ep->desc)
+			continue;
+
+		tmp = readl(ep->reg_status);
+		t = scnprintf(next, size,
+			"%s %s max %u %s, irqs %lu, "
+			"status %02x (%s) " FOURBITS "\n",
+			ep->ep.name,
+			ep->is_in ? "in" : "out",
+			ep->ep.maxpacket,
+			ep->dma ? "dma" : "pio",
+			ep->irqs,
+			tmp, ({ char *s;
+			switch (tmp & EPxSTATUS_EP_MASK) {
+			case EPxSTATUS_EP_READY:
+				s = "ready"; break;
+			case EPxSTATUS_EP_DATAIN:
+				s = "packet"; break;
+			case EPxSTATUS_EP_FULL:
+				s = "full"; break;
+			case EPxSTATUS_EP_TX_ERR:	// host will retry
+				s = "tx_err"; break;
+			case EPxSTATUS_EP_RX_ERR:
+				s = "rx_err"; break;
+			case EPxSTATUS_EP_BUSY:		/* ep0 only */
+				s = "busy"; break;
+			case EPxSTATUS_EP_STALL:
+				s = "stall"; break;
+			case EPxSTATUS_EP_INVALID:	// these "can't happen"
+				s = "invalid"; break;
+			default:
+				s = "?"; break;
+			}; s; }),
+			(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
+			(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
+			(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
+			(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
+			);
+		if (t <= 0 || t > size)
+			goto done;
+		size -= t;
+		next += t;
+
+		if (list_empty(&ep->queue)) {
+			t = scnprintf(next, size, "\t(nothing queued)\n");
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+			continue;
+		}
+		list_for_each_entry(req, &ep->queue, queue) {
+			if (ep->dma && req->queue.prev == &ep->queue) {
+				if (i == UDC_MSTRD_ENDPOINT)
+					tmp = readl(&regs->in_dma_current);
+				else
+					tmp = readl(&regs->out_dma_current);
+				tmp -= req->req.dma;
+				tmp++;
+			} else
+				tmp = req->req.actual;
+
+			t = scnprintf(next, size,
+				"\treq %p len %u/%u buf %p\n",
+				&req->req, tmp, req->req.length,
+				req->req.buf);
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+		}
+	}
+
+done:
+	local_irq_restore(flags);
+	*eof = 1;
+	return count - size;
+}
+
+#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*-------------------------------------------------------------------------*/
+
+static void udc_reinit (struct goku_udc *dev)
+{
+	static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
+
+	unsigned i;
+
+	INIT_LIST_HEAD (&dev->gadget.ep_list);
+	dev->gadget.ep0 = &dev->ep [0].ep;
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	dev->ep0state = EP0_DISCONNECT;
+	dev->irqs = 0;
+
+	for (i = 0; i < 4; i++) {
+		struct goku_ep	*ep = &dev->ep[i];
+
+		ep->num = i;
+		ep->ep.name = names[i];
+		ep->reg_fifo = &dev->regs->ep_fifo [i];
+		ep->reg_status = &dev->regs->ep_status [i];
+		ep->reg_mode = &dev->regs->ep_mode[i];
+
+		ep->ep.ops = &goku_ep_ops;
+		list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
+		ep->dev = dev;
+		INIT_LIST_HEAD (&ep->queue);
+
+		ep_reset(NULL, ep);
+	}
+
+	dev->ep[0].reg_mode = NULL;
+	dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
+	list_del_init (&dev->ep[0].ep.ep_list);
+}
+
+static void udc_reset(struct goku_udc *dev)
+{
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+
+	writel(0, &regs->power_detect);
+	writel(0, &regs->int_enable);
+	readl(&regs->int_enable);
+	dev->int_enable = 0;
+
+	/* deassert reset, leave USB D+ at hi-Z (no pullup)
+	 * don't let INT_PWRDETECT sequence begin
+	 */
+	udelay(250);
+	writel(PW_RESETB, &regs->power_detect);
+	readl(&regs->int_enable);
+}
+
+static void ep0_start(struct goku_udc *dev)
+{
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+	unsigned			i;
+
+	VDBG(dev, "%s\n", __func__);
+
+	udc_reset(dev);
+	udc_reinit (dev);
+	//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
+
+	/* hw handles set_address, set_feature, get_status; maybe more */
+	writel(   G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
+		| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
+		| G_REQMODE_GET_DESC
+		| G_REQMODE_CLEAR_FEAT
+		, &regs->reqmode);
+
+	for (i = 0; i < 4; i++)
+		dev->ep[i].irqs = 0;
+
+	/* can't modify descriptors after writing UsbReady */
+	for (i = 0; i < DESC_LEN; i++)
+		writel(0, &regs->descriptors[i]);
+	writel(0, &regs->UsbReady);
+
+	/* expect ep0 requests when the host drops reset */
+	writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
+	dev->int_enable = INT_DEVWIDE | INT_EP0;
+	writel(dev->int_enable, &dev->regs->int_enable);
+	readl(&regs->int_enable);
+	dev->gadget.speed = USB_SPEED_FULL;
+	dev->ep0state = EP0_IDLE;
+}
+
+static void udc_enable(struct goku_udc *dev)
+{
+	/* start enumeration now, or after power detect irq */
+	if (readl(&dev->regs->power_detect) & PW_DETECT)
+		ep0_start(dev);
+	else {
+		DBG(dev, "%s\n", __func__);
+		dev->int_enable = INT_PWRDETECT;
+		writel(dev->int_enable, &dev->regs->int_enable);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* keeping it simple:
+ * - one bus driver, initted first;
+ * - one function driver, initted second
+ */
+
+static struct goku_udc	*the_controller;
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int goku_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct goku_udc	*dev = the_controller;
+	int			retval;
+
+	if (!driver
+			|| driver->max_speed < USB_SPEED_FULL
+			|| !bind
+			|| !driver->disconnect
+			|| !driver->setup)
+		return -EINVAL;
+	if (!dev)
+		return -ENODEV;
+	if (dev->driver)
+		return -EBUSY;
+
+	/* hook up the driver */
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+	retval = bind(&dev->gadget);
+	if (retval) {
+		DBG(dev, "bind to driver %s --> error %d\n",
+				driver->driver.name, retval);
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+
+	/* then enable host detection and ep0; and we're ready
+	 * for set_configuration as well as eventual disconnect.
+	 */
+	udc_enable(dev);
+
+	DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
+	return 0;
+}
+
+static void
+stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
+{
+	unsigned	i;
+
+	DBG (dev, "%s\n", __func__);
+
+	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+
+	/* disconnect gadget driver after quiesceing hw and the driver */
+	udc_reset (dev);
+	for (i = 0; i < 4; i++)
+		nuke(&dev->ep [i], -ESHUTDOWN);
+	if (driver) {
+		spin_unlock(&dev->lock);
+		driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+	}
+
+	if (dev->driver)
+		udc_enable(dev);
+}
+
+static int goku_stop(struct usb_gadget_driver *driver)
+{
+	struct goku_udc	*dev = the_controller;
+	unsigned long	flags;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver || !driver->unbind)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->driver = NULL;
+	stop_activity(dev, driver);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+
+	DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void ep0_setup(struct goku_udc *dev)
+{
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+	struct usb_ctrlrequest		ctrl;
+	int				tmp;
+
+	/* read SETUP packet and enter DATA stage */
+	ctrl.bRequestType = readl(&regs->bRequestType);
+	ctrl.bRequest = readl(&regs->bRequest);
+	ctrl.wValue  = cpu_to_le16((readl(&regs->wValueH)  << 8)
+					| readl(&regs->wValueL));
+	ctrl.wIndex  = cpu_to_le16((readl(&regs->wIndexH)  << 8)
+					| readl(&regs->wIndexL));
+	ctrl.wLength = cpu_to_le16((readl(&regs->wLengthH) << 8)
+					| readl(&regs->wLengthL));
+	writel(0, &regs->SetupRecv);
+
+	nuke(&dev->ep[0], 0);
+	dev->ep[0].stopped = 0;
+	if (likely(ctrl.bRequestType & USB_DIR_IN)) {
+		dev->ep[0].is_in = 1;
+		dev->ep0state = EP0_IN;
+		/* detect early status stages */
+		writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
+	} else {
+		dev->ep[0].is_in = 0;
+		dev->ep0state = EP0_OUT;
+
+		/* NOTE:  CLEAR_FEATURE is done in software so that we can
+		 * synchronize transfer restarts after bulk IN stalls.  data
+		 * won't even enter the fifo until the halt is cleared.
+		 */
+		switch (ctrl.bRequest) {
+		case USB_REQ_CLEAR_FEATURE:
+			switch (ctrl.bRequestType) {
+			case USB_RECIP_ENDPOINT:
+				tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
+				/* active endpoint */
+				if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
+					goto stall;
+				if (ctrl.wIndex & cpu_to_le16(
+						USB_DIR_IN)) {
+					if (!dev->ep[tmp].is_in)
+						goto stall;
+				} else {
+					if (dev->ep[tmp].is_in)
+						goto stall;
+				}
+				if (ctrl.wValue != cpu_to_le16(
+						USB_ENDPOINT_HALT))
+					goto stall;
+				if (tmp)
+					goku_clear_halt(&dev->ep[tmp]);
+succeed:
+				/* start ep0out status stage */
+				writel(~(1<<0), &regs->EOP);
+				dev->ep[0].stopped = 1;
+				dev->ep0state = EP0_STATUS;
+				return;
+			case USB_RECIP_DEVICE:
+				/* device remote wakeup: always clear */
+				if (ctrl.wValue != cpu_to_le16(1))
+					goto stall;
+				VDBG(dev, "clear dev remote wakeup\n");
+				goto succeed;
+			case USB_RECIP_INTERFACE:
+				goto stall;
+			default:		/* pass to gadget driver */
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+#ifdef USB_TRACE
+	VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+		ctrl.bRequestType, ctrl.bRequest,
+		le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
+		le16_to_cpu(ctrl.wLength));
+#endif
+
+	/* hw wants to know when we're configured (or not) */
+	dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
+				&& ctrl.bRequestType == USB_RECIP_DEVICE);
+	if (unlikely(dev->req_config))
+		dev->configured = (ctrl.wValue != cpu_to_le16(0));
+
+	/* delegate everything to the gadget driver.
+	 * it may respond after this irq handler returns.
+	 */
+	spin_unlock (&dev->lock);
+	tmp = dev->driver->setup(&dev->gadget, &ctrl);
+	spin_lock (&dev->lock);
+	if (unlikely(tmp < 0)) {
+stall:
+#ifdef USB_TRACE
+		VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
+				ctrl.bRequestType, ctrl.bRequest, tmp);
+#endif
+		command(regs, COMMAND_STALL, 0);
+		dev->ep[0].stopped = 1;
+		dev->ep0state = EP0_STALL;
+	}
+
+	/* expect at least one data or status stage irq */
+}
+
+#define ACK(irqbit) { \
+		stat &= ~irqbit; \
+		writel(~irqbit, &regs->int_status); \
+		handled = 1; \
+		}
+
+static irqreturn_t goku_irq(int irq, void *_dev)
+{
+	struct goku_udc			*dev = _dev;
+	struct goku_udc_regs __iomem	*regs = dev->regs;
+	struct goku_ep			*ep;
+	u32				stat, handled = 0;
+	unsigned			i, rescans = 5;
+
+	spin_lock(&dev->lock);
+
+rescan:
+	stat = readl(&regs->int_status) & dev->int_enable;
+        if (!stat)
+		goto done;
+	dev->irqs++;
+
+	/* device-wide irqs */
+	if (unlikely(stat & INT_DEVWIDE)) {
+		if (stat & INT_SYSERROR) {
+			ERROR(dev, "system error\n");
+			stop_activity(dev, dev->driver);
+			stat = 0;
+			handled = 1;
+			// FIXME have a neater way to prevent re-enumeration
+			dev->driver = NULL;
+			goto done;
+		}
+		if (stat & INT_PWRDETECT) {
+			writel(~stat, &regs->int_status);
+			if (readl(&dev->regs->power_detect) & PW_DETECT) {
+				VDBG(dev, "connect\n");
+				ep0_start(dev);
+			} else {
+				DBG(dev, "disconnect\n");
+				if (dev->gadget.speed == USB_SPEED_FULL)
+					stop_activity(dev, dev->driver);
+				dev->ep0state = EP0_DISCONNECT;
+				dev->int_enable = INT_DEVWIDE;
+				writel(dev->int_enable, &dev->regs->int_enable);
+			}
+			stat = 0;
+			handled = 1;
+			goto done;
+		}
+		if (stat & INT_SUSPEND) {
+			ACK(INT_SUSPEND);
+			if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
+				switch (dev->ep0state) {
+				case EP0_DISCONNECT:
+				case EP0_SUSPEND:
+					goto pm_next;
+				default:
+					break;
+				}
+				DBG(dev, "USB suspend\n");
+				dev->ep0state = EP0_SUSPEND;
+				if (dev->gadget.speed != USB_SPEED_UNKNOWN
+						&& dev->driver
+						&& dev->driver->suspend) {
+					spin_unlock(&dev->lock);
+					dev->driver->suspend(&dev->gadget);
+					spin_lock(&dev->lock);
+				}
+			} else {
+				if (dev->ep0state != EP0_SUSPEND) {
+					DBG(dev, "bogus USB resume %d\n",
+						dev->ep0state);
+					goto pm_next;
+				}
+				DBG(dev, "USB resume\n");
+				dev->ep0state = EP0_IDLE;
+				if (dev->gadget.speed != USB_SPEED_UNKNOWN
+						&& dev->driver
+						&& dev->driver->resume) {
+					spin_unlock(&dev->lock);
+					dev->driver->resume(&dev->gadget);
+					spin_lock(&dev->lock);
+				}
+			}
+		}
+pm_next:
+		if (stat & INT_USBRESET) {		/* hub reset done */
+			ACK(INT_USBRESET);
+			INFO(dev, "USB reset done, gadget %s\n",
+				dev->driver->driver.name);
+		}
+		// and INT_ERR on some endpoint's crc/bitstuff/... problem
+	}
+
+	/* progress ep0 setup, data, or status stages.
+	 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
+	 */
+	if (stat & INT_SETUP) {
+		ACK(INT_SETUP);
+		dev->ep[0].irqs++;
+		ep0_setup(dev);
+	}
+        if (stat & INT_STATUSNAK) {
+		ACK(INT_STATUSNAK|INT_ENDPOINT0);
+		if (dev->ep0state == EP0_IN) {
+			ep = &dev->ep[0];
+			ep->irqs++;
+			nuke(ep, 0);
+			writel(~(1<<0), &regs->EOP);
+			dev->ep0state = EP0_STATUS;
+		}
+	}
+        if (stat & INT_ENDPOINT0) {
+		ACK(INT_ENDPOINT0);
+		ep = &dev->ep[0];
+		ep->irqs++;
+		pio_advance(ep);
+        }
+
+	/* dma completion */
+        if (stat & INT_MSTRDEND) {	/* IN */
+		ACK(INT_MSTRDEND);
+		ep = &dev->ep[UDC_MSTRD_ENDPOINT];
+		ep->irqs++;
+		dma_advance(dev, ep);
+        }
+        if (stat & INT_MSTWREND) {	/* OUT */
+		ACK(INT_MSTWREND);
+		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
+		ep->irqs++;
+		dma_advance(dev, ep);
+        }
+        if (stat & INT_MSTWRTMOUT) {	/* OUT */
+		ACK(INT_MSTWRTMOUT);
+		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
+		ep->irqs++;
+		ERROR(dev, "%s write timeout ?\n", ep->ep.name);
+		// reset dma? then dma_advance()
+        }
+
+	/* pio */
+	for (i = 1; i < 4; i++) {
+		u32		tmp = INT_EPxDATASET(i);
+
+		if (!(stat & tmp))
+			continue;
+		ep = &dev->ep[i];
+		pio_advance(ep);
+		if (list_empty (&ep->queue))
+			pio_irq_disable(dev, regs, i);
+		stat &= ~tmp;
+		handled = 1;
+		ep->irqs++;
+	}
+
+	if (rescans--)
+		goto rescan;
+
+done:
+	(void)readl(&regs->int_enable);
+	spin_unlock(&dev->lock);
+	if (stat)
+		DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
+				readl(&regs->int_status), dev->int_enable);
+	return IRQ_RETVAL(handled);
+}
+
+#undef ACK
+
+/*-------------------------------------------------------------------------*/
+
+static void gadget_release(struct device *_dev)
+{
+	struct goku_udc	*dev = dev_get_drvdata(_dev);
+
+	kfree(dev);
+}
+
+/* tear down the binding between this driver and the pci device */
+
+static void goku_remove(struct pci_dev *pdev)
+{
+	struct goku_udc		*dev = pci_get_drvdata(pdev);
+
+	DBG(dev, "%s\n", __func__);
+
+	usb_del_gadget_udc(&dev->gadget);
+
+	BUG_ON(dev->driver);
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	remove_proc_entry(proc_node_name, NULL);
+#endif
+	if (dev->regs)
+		udc_reset(dev);
+	if (dev->got_irq)
+		free_irq(pdev->irq, dev);
+	if (dev->regs)
+		iounmap(dev->regs);
+	if (dev->got_region)
+		release_mem_region(pci_resource_start (pdev, 0),
+				pci_resource_len (pdev, 0));
+	if (dev->enabled)
+		pci_disable_device(pdev);
+	if (dev->registered)
+		device_unregister(&dev->gadget.dev);
+
+	pci_set_drvdata(pdev, NULL);
+	dev->regs = NULL;
+	the_controller = NULL;
+
+	INFO(dev, "unbind\n");
+}
+
+/* wrap this driver around the specified pci device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+
+static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct goku_udc		*dev = NULL;
+	unsigned long		resource, len;
+	void __iomem		*base = NULL;
+	int			retval;
+
+	/* if you want to support more than one controller in a system,
+	 * usb_gadget_driver_{register,unregister}() must change.
+	 */
+	if (the_controller) {
+		pr_warning("ignoring %s\n", pci_name(pdev));
+		return -EBUSY;
+	}
+	if (!pdev->irq) {
+		printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
+		retval = -ENODEV;
+		goto err;
+	}
+
+	/* alloc, and start init */
+	dev = kzalloc (sizeof *dev, GFP_KERNEL);
+	if (dev == NULL){
+		pr_debug("enomem %s\n", pci_name(pdev));
+		retval = -ENOMEM;
+		goto err;
+	}
+
+	spin_lock_init(&dev->lock);
+	dev->pdev = pdev;
+	dev->gadget.ops = &goku_ops;
+	dev->gadget.max_speed = USB_SPEED_FULL;
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&dev->gadget.dev, "gadget");
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = driver_name;
+
+	/* now all the pci goodies ... */
+	retval = pci_enable_device(pdev);
+	if (retval < 0) {
+		DBG(dev, "can't enable, %d\n", retval);
+		goto err;
+	}
+	dev->enabled = 1;
+
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(resource, len, driver_name)) {
+		DBG(dev, "controller already in use\n");
+		retval = -EBUSY;
+		goto err;
+	}
+	dev->got_region = 1;
+
+	base = ioremap_nocache(resource, len);
+	if (base == NULL) {
+		DBG(dev, "can't map memory\n");
+		retval = -EFAULT;
+		goto err;
+	}
+	dev->regs = (struct goku_udc_regs __iomem *) base;
+
+	pci_set_drvdata(pdev, dev);
+	INFO(dev, "%s\n", driver_desc);
+	INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
+	INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
+
+	/* init to known state, then setup irqs */
+	udc_reset(dev);
+	udc_reinit (dev);
+	if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
+			driver_name, dev) != 0) {
+		DBG(dev, "request interrupt %d failed\n", pdev->irq);
+		retval = -EBUSY;
+		goto err;
+	}
+	dev->got_irq = 1;
+	if (use_dma)
+		pci_set_master(pdev);
+
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
+#endif
+
+	the_controller = dev;
+	retval = device_register(&dev->gadget.dev);
+	if (retval) {
+		put_device(&dev->gadget.dev);
+		goto err;
+	}
+	dev->registered = 1;
+	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+	if (retval)
+		goto err;
+
+	return 0;
+
+err:
+	if (dev)
+		goku_remove (pdev);
+	return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static const struct pci_device_id pci_ids[] = { {
+	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+	.class_mask =	~0,
+	.vendor =	0x102f,		/* Toshiba */
+	.device =	0x0107,		/* this UDC */
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+
+}, { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE (pci, pci_ids);
+
+static struct pci_driver goku_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+
+	.probe =	goku_probe,
+	.remove =	goku_remove,
+
+	/* FIXME add power management support */
+};
+
+static int __init init (void)
+{
+	return pci_register_driver (&goku_pci_driver);
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	pci_unregister_driver (&goku_pci_driver);
+}
+module_exit (cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/goku_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/goku_udc.h
new file mode 100644
index 0000000..e7e0c69
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/goku_udc.h
@@ -0,0 +1,293 @@
+/*
+ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
+ *
+ * Copyright (C) 2000-2002 Lineo
+ *      by Stuart Lynne, Tom Rushworth, and Bruce Balden
+ * Copyright (C) 2002 Toshiba Corporation
+ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * PCI BAR 0 points to these registers.
+ */
+struct goku_udc_regs {
+	/* irq management */
+	u32	int_status;		/* 0x000 */
+	u32	int_enable;
+#define INT_SUSPEND		0x00001		/* or resume */
+#define INT_USBRESET		0x00002
+#define INT_ENDPOINT0		0x00004
+#define INT_SETUP		0x00008
+#define INT_STATUS		0x00010
+#define INT_STATUSNAK		0x00020
+#define INT_EPxDATASET(n)	(0x00020 << (n))	/* 0 < n < 4 */
+#	define INT_EP1DATASET		0x00040
+#	define INT_EP2DATASET		0x00080
+#	define INT_EP3DATASET		0x00100
+#define INT_EPnNAK(n)		(0x00100 < (n))		/* 0 < n < 4 */
+#	define INT_EP1NAK		0x00200
+#	define INT_EP2NAK		0x00400
+#	define INT_EP3NAK		0x00800
+#define INT_SOF			0x01000
+#define INT_ERR			0x02000
+#define INT_MSTWRSET		0x04000
+#define INT_MSTWREND		0x08000
+#define INT_MSTWRTMOUT		0x10000
+#define INT_MSTRDEND		0x20000
+#define INT_SYSERROR		0x40000
+#define INT_PWRDETECT		0x80000
+
+#define	INT_DEVWIDE \
+	(INT_PWRDETECT|INT_SYSERROR/*|INT_ERR*/|INT_USBRESET|INT_SUSPEND)
+#define	INT_EP0 \
+	(INT_SETUP|INT_ENDPOINT0/*|INT_STATUS*/|INT_STATUSNAK)
+
+	u32	dma_master;
+#define MST_EOPB_DIS		0x0800
+#define MST_EOPB_ENA		0x0400
+#define MST_TIMEOUT_DIS		0x0200
+#define MST_TIMEOUT_ENA		0x0100
+#define MST_RD_EOPB		0x0080		/* write-only */
+#define MST_RD_RESET		0x0040
+#define MST_WR_RESET		0x0020
+#define MST_RD_ENA		0x0004		/* 1:start, 0:ignore */
+#define MST_WR_ENA		0x0002		/* 1:start, 0:ignore */
+#define MST_CONNECTION		0x0001		/* 0 for ep1out/ep2in */
+
+#define MST_R_BITS		(MST_EOPB_DIS|MST_EOPB_ENA \
+					|MST_RD_ENA|MST_RD_RESET)
+#define MST_W_BITS		(MST_TIMEOUT_DIS|MST_TIMEOUT_ENA \
+					|MST_WR_ENA|MST_WR_RESET)
+#define MST_RW_BITS		(MST_R_BITS|MST_W_BITS \
+					|MST_CONNECTION)
+
+/* these values assume (dma_master & MST_CONNECTION) == 0 */
+#define UDC_MSTWR_ENDPOINT        1
+#define UDC_MSTRD_ENDPOINT        2
+
+	/* dma master write */
+	u32	out_dma_start;
+	u32	out_dma_end;
+	u32	out_dma_current;
+
+	/* dma master read */
+	u32	in_dma_start;
+	u32	in_dma_end;
+	u32	in_dma_current;
+
+	u32	power_detect;
+#define PW_DETECT		0x04
+#define PW_RESETB		0x02
+#define PW_PULLUP		0x01
+
+	u8	_reserved0 [0x1d8];
+
+	/* endpoint registers */
+	u32	ep_fifo [4];		/* 0x200 */
+	u8	_reserved1 [0x10];
+	u32	ep_mode [4];		/* only 1-3 valid */
+	u8	_reserved2 [0x10];
+
+	u32	ep_status [4];
+#define EPxSTATUS_TOGGLE	0x40
+#define EPxSTATUS_SUSPEND	0x20
+#define EPxSTATUS_EP_MASK	(0x07<<2)
+#	define EPxSTATUS_EP_READY	(0<<2)
+#	define EPxSTATUS_EP_DATAIN	(1<<2)
+#	define EPxSTATUS_EP_FULL	(2<<2)
+#	define EPxSTATUS_EP_TX_ERR	(3<<2)
+#	define EPxSTATUS_EP_RX_ERR	(4<<2)
+#	define EPxSTATUS_EP_BUSY	(5<<2)
+#	define EPxSTATUS_EP_STALL	(6<<2)
+#	define EPxSTATUS_EP_INVALID	(7<<2)
+#define EPxSTATUS_FIFO_DISABLE	0x02
+#define EPxSTATUS_STAGE_ERROR	0x01
+
+	u8	_reserved3 [0x10];
+	u32	EPxSizeLA[4];
+#define PACKET_ACTIVE		(1<<7)
+#define DATASIZE		0x7f
+	u8	_reserved3a [0x10];
+	u32	EPxSizeLB[4];		/* only 1,2 valid */
+	u8	_reserved3b [0x10];
+	u32	EPxSizeHA[4];		/* only 1-3 valid */
+	u8	_reserved3c [0x10];
+	u32	EPxSizeHB[4];		/* only 1,2 valid */
+	u8	_reserved4[0x30];
+
+	/* SETUP packet contents */
+	u32	bRequestType;		/* 0x300 */
+	u32	bRequest;
+	u32	wValueL;
+	u32	wValueH;
+	u32	wIndexL;
+	u32	wIndexH;
+	u32	wLengthL;
+	u32	wLengthH;
+
+	/* command interaction/handshaking */
+	u32	SetupRecv;		/* 0x320 */
+	u32	CurrConfig;
+	u32	StdRequest;
+	u32	Request;
+	u32	DataSet;
+#define DATASET_A(epnum)	(1<<(2*(epnum)))
+#define DATASET_B(epnum)	(2<<(2*(epnum)))
+#define DATASET_AB(epnum)	(3<<(2*(epnum)))
+	u8	_reserved5[4];
+
+	u32	UsbState;
+#define USBSTATE_CONFIGURED	0x04
+#define USBSTATE_ADDRESSED	0x02
+#define USBSTATE_DEFAULT	0x01
+
+	u32	EOP;
+
+	u32	Command;		/* 0x340 */
+#define COMMAND_SETDATA0	2
+#define COMMAND_RESET		3
+#define COMMAND_STALL		4
+#define COMMAND_INVALID		5
+#define COMMAND_FIFO_DISABLE	7
+#define COMMAND_FIFO_ENABLE	8
+#define COMMAND_INIT_DESCRIPTOR	9
+#define COMMAND_FIFO_CLEAR	10	/* also stall */
+#define COMMAND_STALL_CLEAR	11
+#define COMMAND_EP(n)		((n) << 4)
+
+	u32	EPxSingle;
+	u8	_reserved6[4];
+	u32	EPxBCS;
+	u8	_reserved7[8];
+	u32	IntControl;
+#define ICONTROL_STATUSNAK	1
+	u8	_reserved8[4];
+
+	u32	reqmode;	// 0x360 standard request mode, low 8 bits
+#define G_REQMODE_SET_INTF	(1<<7)
+#define G_REQMODE_GET_INTF	(1<<6)
+#define G_REQMODE_SET_CONF	(1<<5)
+#define G_REQMODE_GET_CONF	(1<<4)
+#define G_REQMODE_GET_DESC	(1<<3)
+#define G_REQMODE_SET_FEAT	(1<<2)
+#define G_REQMODE_CLEAR_FEAT	(1<<1)
+#define G_REQMODE_GET_STATUS	(1<<0)
+
+	u32	ReqMode;
+	u8	_reserved9[0x18];
+	u32	PortStatus;		/* 0x380 */
+	u8	_reserved10[8];
+	u32	address;
+	u32	buff_test;
+	u8	_reserved11[4];
+	u32	UsbReady;
+	u8	_reserved12[4];
+	u32	SetDescStall;		/* 0x3a0 */
+	u8	_reserved13[0x45c];
+
+	/* hardware could handle limited GET_DESCRIPTOR duties */
+#define	DESC_LEN	0x80
+	u32	descriptors[DESC_LEN];	/* 0x800 */
+	u8	_reserved14[0x600];
+
+} __attribute__ ((packed));
+
+#define	MAX_FIFO_SIZE	64
+#define	MAX_EP0_SIZE	8		/* ep0 fifo is bigger, though */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+
+struct goku_ep {
+	struct usb_ep				ep;
+	struct goku_udc				*dev;
+	unsigned long				irqs;
+
+	unsigned				num:8,
+						dma:1,
+						is_in:1,
+						stopped:1;
+
+	/* analogous to a host-side qh */
+	struct list_head			queue;
+	const struct usb_endpoint_descriptor	*desc;
+
+	u32 __iomem				*reg_fifo;
+	u32 __iomem				*reg_mode;
+	u32 __iomem				*reg_status;
+};
+
+struct goku_request {
+	struct usb_request		req;
+	struct list_head		queue;
+
+	unsigned			mapped:1;
+};
+
+enum ep0state {
+	EP0_DISCONNECT,		/* no host */
+	EP0_IDLE,		/* between STATUS ack and SETUP report */
+	EP0_IN, EP0_OUT,	/* data stage */
+	EP0_STATUS,		/* status stage */
+	EP0_STALL,		/* data or status stages */
+	EP0_SUSPEND,		/* usb suspend */
+};
+
+struct goku_udc {
+	/* each pci device provides one gadget, several endpoints */
+	struct usb_gadget		gadget;
+	spinlock_t			lock;
+	struct goku_ep			ep[4];
+	struct usb_gadget_driver	*driver;
+
+	enum ep0state			ep0state;
+	unsigned			got_irq:1,
+					got_region:1,
+					req_config:1,
+					configured:1,
+					enabled:1,
+					registered:1;
+
+	/* pci state used to access those endpoints */
+	struct pci_dev			*pdev;
+	struct goku_udc_regs __iomem	*regs;
+	u32				int_enable;
+
+	/* statistics... */
+	unsigned long			irqs;
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(dev,level,fmt,args...) \
+	printk(level "%s %s: " fmt , driver_name , \
+			pci_name(dev->pdev) , ## args)
+
+#ifdef DEBUG
+#define DBG(dev,fmt,args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDBG DBG
+#else
+#define VDBG(dev,fmt,args...) \
+	do { } while (0)
+#endif	/* VERBOSE */
+
+#define ERROR(dev,fmt,args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define WARNING(dev,fmt,args...) \
+	xprintk(dev , KERN_WARNING , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/hid.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/hid.c
new file mode 100644
index 0000000..3493adf
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/hid.c
@@ -0,0 +1,288 @@
+/*
+ * hid.c -- HID Composite driver
+ *
+ * Based on multi.c
+ *
+ * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+
+#define DRIVER_DESC		"HID Gadget"
+#define DRIVER_VERSION		"2010/03/16"
+
+/*-------------------------------------------------------------------------*/
+
+#define HIDG_VENDOR_NUM		0x0525	/* XXX NetChip */
+#define HIDG_PRODUCT_NUM	0xa4ac	/* Linux-USB HID gadget */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_hid.c"
+
+
+struct hidg_func_node {
+	struct list_head node;
+	struct hidg_func_descriptor *func;
+};
+
+static LIST_HEAD(hidg_func_list);
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+
+	/* .bDeviceClass =		USB_CLASS_COMM, */
+	/* .bDeviceSubClass =	0, */
+	/* .bDeviceProtocol =	0, */
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id can be overridden by module parameters.  */
+	.idVendor =		cpu_to_le16(HIDG_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(HIDG_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+
+
+/****************************** Configurations ******************************/
+
+static int __init do_config(struct usb_configuration *c)
+{
+	struct hidg_func_node *e;
+	int func = 0, status = 0;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	list_for_each_entry(e, &hidg_func_list, node) {
+		status = hidg_bind_config(c, e->func, func++);
+		if (status)
+			break;
+	}
+
+	return status;
+}
+
+static struct usb_configuration config_driver = {
+	.label			= "HID Gadget",
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+/****************************** Gadget Bind ******************************/
+
+static int __init hid_bind(struct usb_composite_dev *cdev)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	struct list_head *tmp;
+	int status, gcnum, funcs = 0;
+
+	list_for_each(tmp, &hidg_func_list)
+		funcs++;
+
+	if (!funcs)
+		return -ENODEV;
+
+	/* set up HID */
+	status = ghid_setup(cdev->gadget, funcs);
+	if (status < 0)
+		return status;
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
+
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		return status;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		return status;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	/* register our configuration */
+	status = usb_add_config(cdev, &config_driver, do_config);
+	if (status < 0)
+		return status;
+
+	dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+
+	return 0;
+}
+
+static int __exit hid_unbind(struct usb_composite_dev *cdev)
+{
+	ghid_cleanup();
+	return 0;
+}
+
+static int __init hidg_plat_driver_probe(struct platform_device *pdev)
+{
+	struct hidg_func_descriptor *func = pdev->dev.platform_data;
+	struct hidg_func_node *entry;
+
+	if (!func) {
+		dev_err(&pdev->dev, "Platform data missing\n");
+		return -ENODEV;
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->func = func;
+	list_add_tail(&entry->node, &hidg_func_list);
+
+	return 0;
+}
+
+static int __devexit hidg_plat_driver_remove(struct platform_device *pdev)
+{
+	struct hidg_func_node *e, *n;
+
+	list_for_each_entry_safe(e, n, &hidg_func_list, node) {
+		list_del(&e->node);
+		kfree(e);
+	}
+
+	return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+
+static struct usb_composite_driver hidg_driver = {
+	.name		= "g_hid",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= __exit_p(hid_unbind),
+};
+
+static struct platform_driver hidg_plat_driver = {
+	.remove		= __devexit_p(hidg_plat_driver_remove),
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "hidg",
+	},
+};
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Fabien Chouteau, Peter Korsgaard");
+MODULE_LICENSE("GPL");
+
+static int __init hidg_init(void)
+{
+	int status;
+
+	status = platform_driver_probe(&hidg_plat_driver,
+				hidg_plat_driver_probe);
+	if (status < 0)
+		return status;
+
+	status = usb_composite_probe(&hidg_driver, hid_bind);
+	if (status < 0)
+		platform_driver_unregister(&hidg_plat_driver);
+
+	return status;
+}
+module_init(hidg_init);
+
+static void __exit hidg_cleanup(void)
+{
+	platform_driver_unregister(&hidg_plat_driver);
+	usb_composite_unregister(&hidg_driver);
+}
+module_exit(hidg_cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/imx_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/imx_udc.c
new file mode 100644
index 0000000..8d1c75a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/imx_udc.c
@@ -0,0 +1,1595 @@
+/*
+ *	driver/usb/gadget/imx_udc.c
+ *
+ *	Copyright (C) 2005 Mike Lee <eemike@gmail.com>
+ *	Copyright (C) 2008 Darius Augulis <augulis.darius@gmail.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <mach/usb.h>
+#include <mach/hardware.h>
+
+#include "imx_udc.h"
+
+static const char driver_name[] = "imx_udc";
+static const char ep0name[] = "ep0";
+
+void ep0_chg_stat(const char *label, struct imx_udc_struct *imx_usb,
+							enum ep0_state stat);
+
+/*******************************************************************************
+ * IMX UDC hardware related functions
+ *******************************************************************************
+ */
+
+void imx_udc_enable(struct imx_udc_struct *imx_usb)
+{
+	int temp = __raw_readl(imx_usb->base + USB_CTRL);
+	__raw_writel(temp | CTRL_FE_ENA | CTRL_AFE_ENA,
+						imx_usb->base + USB_CTRL);
+	imx_usb->gadget.speed = USB_SPEED_FULL;
+}
+
+void imx_udc_disable(struct imx_udc_struct *imx_usb)
+{
+	int temp = __raw_readl(imx_usb->base + USB_CTRL);
+
+	__raw_writel(temp & ~(CTRL_FE_ENA | CTRL_AFE_ENA),
+		 imx_usb->base + USB_CTRL);
+
+	ep0_chg_stat(__func__, imx_usb, EP0_IDLE);
+	imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+void imx_udc_reset(struct imx_udc_struct *imx_usb)
+{
+	int temp = __raw_readl(imx_usb->base + USB_ENAB);
+
+	/* set RST bit */
+	__raw_writel(temp | ENAB_RST, imx_usb->base + USB_ENAB);
+
+	/* wait RST bit to clear */
+	do {} while (__raw_readl(imx_usb->base + USB_ENAB) & ENAB_RST);
+
+	/* wait CFG bit to assert */
+	do {} while (!(__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG));
+
+	/* udc module is now ready */
+}
+
+void imx_udc_config(struct imx_udc_struct *imx_usb)
+{
+	u8 ep_conf[5];
+	u8 i, j, cfg;
+	struct imx_ep_struct *imx_ep;
+
+	/* wait CFG bit to assert */
+	do {} while (!(__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG));
+
+	/* Download the endpoint buffer for endpoint 0. */
+	for (j = 0; j < 5; j++) {
+		i = (j == 2 ? imx_usb->imx_ep[0].fifosize : 0x00);
+		__raw_writeb(i, imx_usb->base + USB_DDAT);
+		do {} while (__raw_readl(imx_usb->base + USB_DADR) & DADR_BSY);
+	}
+
+	/* Download the endpoint buffers for endpoints 1-5.
+	 * We specify two configurations, one interface
+	 */
+	for (cfg = 1; cfg < 3; cfg++) {
+		for (i = 1; i < IMX_USB_NB_EP; i++) {
+			imx_ep = &imx_usb->imx_ep[i];
+			/* EP no | Config no */
+			ep_conf[0] = (i << 4) | (cfg << 2);
+			/* Type | Direction */
+			ep_conf[1] = (imx_ep->bmAttributes << 3) |
+					(EP_DIR(imx_ep) << 2);
+			/* Max packet size */
+			ep_conf[2] = imx_ep->fifosize;
+			/* TRXTYP */
+			ep_conf[3] = 0xC0;
+			/* FIFO no */
+			ep_conf[4] = i;
+
+			D_INI(imx_usb->dev,
+				"<%s> ep%d_conf[%d]:"
+				"[%02x-%02x-%02x-%02x-%02x]\n",
+				__func__, i, cfg,
+				ep_conf[0], ep_conf[1], ep_conf[2],
+				ep_conf[3], ep_conf[4]);
+
+			for (j = 0; j < 5; j++) {
+				__raw_writeb(ep_conf[j],
+					imx_usb->base + USB_DDAT);
+				do {} while (__raw_readl(imx_usb->base
+								+ USB_DADR)
+					& DADR_BSY);
+			}
+		}
+	}
+
+	/* wait CFG bit to clear */
+	do {} while (__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG);
+}
+
+void imx_udc_init_irq(struct imx_udc_struct *imx_usb)
+{
+	int i;
+
+	/* Mask and clear all irqs */
+	__raw_writel(0xFFFFFFFF, imx_usb->base + USB_MASK);
+	__raw_writel(0xFFFFFFFF, imx_usb->base + USB_INTR);
+	for (i = 0; i < IMX_USB_NB_EP; i++) {
+		__raw_writel(0x1FF, imx_usb->base + USB_EP_MASK(i));
+		__raw_writel(0x1FF, imx_usb->base + USB_EP_INTR(i));
+	}
+
+	/* Enable USB irqs */
+	__raw_writel(INTR_MSOF | INTR_FRAME_MATCH, imx_usb->base + USB_MASK);
+
+	/* Enable EP0 irqs */
+	__raw_writel(0x1FF & ~(EPINTR_DEVREQ | EPINTR_MDEVREQ | EPINTR_EOT
+		| EPINTR_EOF | EPINTR_FIFO_EMPTY | EPINTR_FIFO_FULL),
+		imx_usb->base + USB_EP_MASK(0));
+}
+
+void imx_udc_init_ep(struct imx_udc_struct *imx_usb)
+{
+	int i, max, temp;
+	struct imx_ep_struct *imx_ep;
+	for (i = 0; i < IMX_USB_NB_EP; i++) {
+		imx_ep = &imx_usb->imx_ep[i];
+		switch (imx_ep->fifosize) {
+		case 8:
+			max = 0;
+			break;
+		case 16:
+			max = 1;
+			break;
+		case 32:
+			max = 2;
+			break;
+		case 64:
+			max = 3;
+			break;
+		default:
+			max = 1;
+			break;
+		}
+		temp = (EP_DIR(imx_ep) << 7) | (max << 5)
+			| (imx_ep->bmAttributes << 3);
+		__raw_writel(temp, imx_usb->base + USB_EP_STAT(i));
+		__raw_writel(temp | EPSTAT_FLUSH,
+						imx_usb->base + USB_EP_STAT(i));
+		D_INI(imx_usb->dev, "<%s> ep%d_stat %08x\n", __func__, i,
+			__raw_readl(imx_usb->base + USB_EP_STAT(i)));
+	}
+}
+
+void imx_udc_init_fifo(struct imx_udc_struct *imx_usb)
+{
+	int i, temp;
+	struct imx_ep_struct *imx_ep;
+	for (i = 0; i < IMX_USB_NB_EP; i++) {
+		imx_ep = &imx_usb->imx_ep[i];
+
+		/* Fifo control */
+		temp = EP_DIR(imx_ep) ? 0x0B000000 : 0x0F000000;
+		__raw_writel(temp, imx_usb->base + USB_EP_FCTRL(i));
+		D_INI(imx_usb->dev, "<%s> ep%d_fctrl %08x\n", __func__, i,
+			__raw_readl(imx_usb->base + USB_EP_FCTRL(i)));
+
+		/* Fifo alarm */
+		temp = (i ? imx_ep->fifosize / 2 : 0);
+		__raw_writel(temp, imx_usb->base + USB_EP_FALRM(i));
+		D_INI(imx_usb->dev, "<%s> ep%d_falrm %08x\n", __func__, i,
+			__raw_readl(imx_usb->base + USB_EP_FALRM(i)));
+	}
+}
+
+static void imx_udc_init(struct imx_udc_struct *imx_usb)
+{
+	/* Reset UDC */
+	imx_udc_reset(imx_usb);
+
+	/* Download config to enpoint buffer */
+	imx_udc_config(imx_usb);
+
+	/* Setup interrups */
+	imx_udc_init_irq(imx_usb);
+
+	/* Setup endpoints */
+	imx_udc_init_ep(imx_usb);
+
+	/* Setup fifos */
+	imx_udc_init_fifo(imx_usb);
+}
+
+void imx_ep_irq_enable(struct imx_ep_struct *imx_ep)
+{
+
+	int i = EP_NO(imx_ep);
+
+	__raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_MASK(i));
+	__raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_INTR(i));
+	__raw_writel(0x1FF & ~(EPINTR_EOT | EPINTR_EOF),
+		imx_ep->imx_usb->base + USB_EP_MASK(i));
+}
+
+void imx_ep_irq_disable(struct imx_ep_struct *imx_ep)
+{
+
+	int i = EP_NO(imx_ep);
+
+	__raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_MASK(i));
+	__raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_INTR(i));
+}
+
+int imx_ep_empty(struct imx_ep_struct *imx_ep)
+{
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+	return __raw_readl(imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)))
+			& FSTAT_EMPTY;
+}
+
+unsigned imx_fifo_bcount(struct imx_ep_struct *imx_ep)
+{
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+	return (__raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)))
+			& EPSTAT_BCOUNT) >> 16;
+}
+
+void imx_flush(struct imx_ep_struct *imx_ep)
+{
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+	int temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+	__raw_writel(temp | EPSTAT_FLUSH,
+		imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+}
+
+void imx_ep_stall(struct imx_ep_struct *imx_ep)
+{
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+	int temp, i;
+
+	D_ERR(imx_usb->dev,
+		"<%s> Forced stall on %s\n", __func__, imx_ep->ep.name);
+
+	imx_flush(imx_ep);
+
+	/* Special care for ep0 */
+	if (!EP_NO(imx_ep)) {
+		temp = __raw_readl(imx_usb->base + USB_CTRL);
+		__raw_writel(temp | CTRL_CMDOVER | CTRL_CMDERROR,
+						imx_usb->base + USB_CTRL);
+		do { } while (__raw_readl(imx_usb->base + USB_CTRL)
+						& CTRL_CMDOVER);
+		temp = __raw_readl(imx_usb->base + USB_CTRL);
+		__raw_writel(temp & ~CTRL_CMDERROR, imx_usb->base + USB_CTRL);
+	}
+	else {
+		temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+		__raw_writel(temp | EPSTAT_STALL,
+			imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+
+		for (i = 0; i < 100; i ++) {
+			temp = __raw_readl(imx_usb->base
+						+ USB_EP_STAT(EP_NO(imx_ep)));
+			if (!(temp & EPSTAT_STALL))
+	 			break;
+	 		udelay(20);
+	 	}
+		if (i == 100)
+			D_ERR(imx_usb->dev, "<%s> Non finished stall on %s\n",
+				__func__, imx_ep->ep.name);
+	}
+}
+
+static int imx_udc_get_frame(struct usb_gadget *_gadget)
+{
+	struct imx_udc_struct *imx_usb = container_of(_gadget,
+			struct imx_udc_struct, gadget);
+
+	return __raw_readl(imx_usb->base + USB_FRAME) & 0x7FF;
+}
+
+static int imx_udc_wakeup(struct usb_gadget *_gadget)
+{
+	return 0;
+}
+
+/*******************************************************************************
+ * USB request control functions
+ *******************************************************************************
+ */
+
+static void ep_add_request(struct imx_ep_struct *imx_ep,
+							struct imx_request *req)
+{
+	if (unlikely(!req))
+		return;
+
+	req->in_use = 1;
+	list_add_tail(&req->queue, &imx_ep->queue);
+}
+
+static void ep_del_request(struct imx_ep_struct *imx_ep,
+							struct imx_request *req)
+{
+	if (unlikely(!req))
+		return;
+
+	list_del_init(&req->queue);
+	req->in_use = 0;
+}
+
+static void done(struct imx_ep_struct *imx_ep,
+					struct imx_request *req, int status)
+{
+	ep_del_request(imx_ep, req);
+
+	if (likely(req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (status && status != -ESHUTDOWN)
+		D_ERR(imx_ep->imx_usb->dev,
+			"<%s> complete %s req %p stat %d len %u/%u\n", __func__,
+			imx_ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	req->req.complete(&imx_ep->ep, &req->req);
+}
+
+static void nuke(struct imx_ep_struct *imx_ep, int status)
+{
+	struct imx_request *req;
+
+	while (!list_empty(&imx_ep->queue)) {
+		req = list_entry(imx_ep->queue.next, struct imx_request, queue);
+		done(imx_ep, req, status);
+	}
+}
+
+/*******************************************************************************
+ * Data tansfer over USB functions
+ *******************************************************************************
+ */
+static int read_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	u8	*buf;
+	int	bytes_ep, bufferspace, count, i;
+
+	bytes_ep = imx_fifo_bcount(imx_ep);
+	bufferspace = req->req.length - req->req.actual;
+
+	buf = req->req.buf + req->req.actual;
+	prefetchw(buf);
+
+	if (unlikely(imx_ep_empty(imx_ep)))
+		count = 0;	/* zlp */
+	else
+		count = min(bytes_ep, bufferspace);
+
+	for (i = count; i > 0; i--)
+		*buf++ = __raw_readb(imx_ep->imx_usb->base
+						+ USB_EP_FDAT0(EP_NO(imx_ep)));
+	req->req.actual += count;
+
+	return count;
+}
+
+static int write_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	u8	*buf;
+	int	length, count, temp;
+
+	if (unlikely(__raw_readl(imx_ep->imx_usb->base +
+				 USB_EP_STAT(EP_NO(imx_ep))) & EPSTAT_ZLPS)) {
+		D_TRX(imx_ep->imx_usb->dev, "<%s> zlp still queued in EP %s\n",
+			__func__, imx_ep->ep.name);
+		return -1;
+	}
+
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	length = min(req->req.length - req->req.actual, (u32)imx_ep->fifosize);
+
+	if (imx_fifo_bcount(imx_ep) + length > imx_ep->fifosize) {
+		D_TRX(imx_ep->imx_usb->dev, "<%s> packet overfill %s fifo\n",
+			__func__, imx_ep->ep.name);
+		return -1;
+	}
+
+	req->req.actual += length;
+	count = length;
+
+	if (!count && req->req.zero) {	/* zlp */
+		temp = __raw_readl(imx_ep->imx_usb->base
+			+ USB_EP_STAT(EP_NO(imx_ep)));
+		__raw_writel(temp | EPSTAT_ZLPS, imx_ep->imx_usb->base
+			+ USB_EP_STAT(EP_NO(imx_ep)));
+		D_TRX(imx_ep->imx_usb->dev, "<%s> zero packet\n", __func__);
+		return 0;
+	}
+
+	while (count--) {
+		if (count == 0) {	/* last byte */
+			temp = __raw_readl(imx_ep->imx_usb->base
+				+ USB_EP_FCTRL(EP_NO(imx_ep)));
+			__raw_writel(temp | FCTRL_WFR, imx_ep->imx_usb->base
+				+ USB_EP_FCTRL(EP_NO(imx_ep)));
+		}
+		__raw_writeb(*buf++,
+			imx_ep->imx_usb->base + USB_EP_FDAT0(EP_NO(imx_ep)));
+	}
+
+	return length;
+}
+
+static int read_fifo(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	int 	bytes = 0,
+		count,
+		completed = 0;
+
+	while (__raw_readl(imx_ep->imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)))
+		& FSTAT_FR) {
+			count = read_packet(imx_ep, req);
+			bytes += count;
+
+			completed = (count != imx_ep->fifosize);
+			if (completed || req->req.actual == req->req.length) {
+				completed = 1;
+				break;
+			}
+	}
+
+	if (completed || !req->req.length) {
+		done(imx_ep, req, 0);
+		D_REQ(imx_ep->imx_usb->dev, "<%s> %s req<%p> %s\n",
+			__func__, imx_ep->ep.name, req,
+			completed ? "completed" : "not completed");
+		if (!EP_NO(imx_ep))
+			ep0_chg_stat(__func__, imx_ep->imx_usb, EP0_IDLE);
+	}
+
+	D_TRX(imx_ep->imx_usb->dev, "<%s> bytes read: %d\n", __func__, bytes);
+
+	return completed;
+}
+
+static int write_fifo(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	int	bytes = 0,
+		count,
+		completed = 0;
+
+	while (!completed) {
+		count = write_packet(imx_ep, req);
+		if (count < 0)
+			break; /* busy */
+		bytes += count;
+
+		/* last packet "must be" short (or a zlp) */
+		completed = (count != imx_ep->fifosize);
+
+		if (unlikely(completed)) {
+			done(imx_ep, req, 0);
+			D_REQ(imx_ep->imx_usb->dev, "<%s> %s req<%p> %s\n",
+				__func__, imx_ep->ep.name, req,
+				completed ? "completed" : "not completed");
+			if (!EP_NO(imx_ep))
+				ep0_chg_stat(__func__,
+						imx_ep->imx_usb, EP0_IDLE);
+		}
+	}
+
+	D_TRX(imx_ep->imx_usb->dev, "<%s> bytes sent: %d\n", __func__, bytes);
+
+	return completed;
+}
+
+/*******************************************************************************
+ * Endpoint handlers
+ *******************************************************************************
+ */
+static int handle_ep(struct imx_ep_struct *imx_ep)
+{
+	struct imx_request *req;
+	int completed = 0;
+
+	do {
+		if (!list_empty(&imx_ep->queue))
+			req = list_entry(imx_ep->queue.next,
+				struct imx_request, queue);
+		else {
+			D_REQ(imx_ep->imx_usb->dev, "<%s> no request on %s\n",
+				__func__, imx_ep->ep.name);
+			return 0;
+		}
+
+		if (EP_DIR(imx_ep))	/* to host */
+			completed = write_fifo(imx_ep, req);
+		else			/* to device */
+			completed = read_fifo(imx_ep, req);
+
+		dump_ep_stat(__func__, imx_ep);
+
+	} while (completed);
+
+	return 0;
+}
+
+static int handle_ep0(struct imx_ep_struct *imx_ep)
+{
+	struct imx_request *req = NULL;
+	int ret = 0;
+
+	if (!list_empty(&imx_ep->queue)) {
+		req = list_entry(imx_ep->queue.next, struct imx_request, queue);
+
+		switch (imx_ep->imx_usb->ep0state) {
+
+		case EP0_IN_DATA_PHASE:			/* GET_DESCRIPTOR */
+			write_fifo(imx_ep, req);
+			break;
+		case EP0_OUT_DATA_PHASE:		/* SET_DESCRIPTOR */
+			read_fifo(imx_ep, req);
+			break;
+		default:
+			D_EP0(imx_ep->imx_usb->dev,
+				"<%s> ep0 i/o, odd state %d\n",
+				__func__, imx_ep->imx_usb->ep0state);
+			ep_del_request(imx_ep, req);
+			ret = -EL2HLT;
+			break;
+		}
+	}
+
+	else
+		D_ERR(imx_ep->imx_usb->dev, "<%s> no request on %s\n",
+						__func__, imx_ep->ep.name);
+
+	return ret;
+}
+
+static void handle_ep0_devreq(struct imx_udc_struct *imx_usb)
+{
+	struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[0];
+	union {
+		struct usb_ctrlrequest	r;
+		u8			raw[8];
+		u32			word[2];
+	} u;
+	int temp, i;
+
+	nuke(imx_ep, -EPROTO);
+
+	/* read SETUP packet */
+	for (i = 0; i < 2; i++) {
+		if (imx_ep_empty(imx_ep)) {
+			D_ERR(imx_usb->dev,
+				"<%s> no setup packet received\n", __func__);
+			goto stall;
+		}
+		u.word[i] = __raw_readl(imx_usb->base
+						+ USB_EP_FDAT(EP_NO(imx_ep)));
+	}
+
+	temp = imx_ep_empty(imx_ep);
+	while (!imx_ep_empty(imx_ep)) {
+		i = __raw_readl(imx_usb->base +	USB_EP_FDAT(EP_NO(imx_ep)));
+		D_ERR(imx_usb->dev,
+			"<%s> wrong to have extra bytes for setup : 0x%08x\n",
+			__func__, i);
+	}
+	if (!temp)
+		goto stall;
+
+	le16_to_cpus(&u.r.wValue);
+	le16_to_cpus(&u.r.wIndex);
+	le16_to_cpus(&u.r.wLength);
+
+	D_REQ(imx_usb->dev, "<%s> SETUP %02x.%02x v%04x i%04x l%04x\n",
+		__func__, u.r.bRequestType, u.r.bRequest,
+		u.r.wValue, u.r.wIndex, u.r.wLength);
+
+	if (imx_usb->set_config) {
+		/* NACK the host by using CMDOVER */
+		temp = __raw_readl(imx_usb->base + USB_CTRL);
+		__raw_writel(temp | CTRL_CMDOVER, imx_usb->base + USB_CTRL);
+
+		D_ERR(imx_usb->dev,
+			"<%s> set config req is pending, NACK the host\n",
+			__func__);
+		return;
+	}
+
+	if (u.r.bRequestType & USB_DIR_IN)
+		ep0_chg_stat(__func__, imx_usb, EP0_IN_DATA_PHASE);
+	else
+		ep0_chg_stat(__func__, imx_usb, EP0_OUT_DATA_PHASE);
+
+	i = imx_usb->driver->setup(&imx_usb->gadget, &u.r);
+	if (i < 0) {
+		D_ERR(imx_usb->dev, "<%s> device setup error %d\n",
+			__func__, i);
+		goto stall;
+	}
+
+	return;
+stall:
+	D_ERR(imx_usb->dev, "<%s> protocol STALL\n", __func__);
+	imx_ep_stall(imx_ep);
+	ep0_chg_stat(__func__, imx_usb, EP0_STALL);
+	return;
+}
+
+/*******************************************************************************
+ * USB gadget callback functions
+ *******************************************************************************
+ */
+
+static int imx_ep_enable(struct usb_ep *usb_ep,
+				const struct usb_endpoint_descriptor *desc)
+{
+	struct imx_ep_struct *imx_ep = container_of(usb_ep,
+						struct imx_ep_struct, ep);
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+	unsigned long flags;
+
+	if (!usb_ep
+		|| !desc
+		|| !EP_NO(imx_ep)
+		|| desc->bDescriptorType != USB_DT_ENDPOINT
+		|| imx_ep->bEndpointAddress != desc->bEndpointAddress) {
+			D_ERR(imx_usb->dev,
+				"<%s> bad ep or descriptor\n", __func__);
+			return -EINVAL;
+	}
+
+	if (imx_ep->bmAttributes != desc->bmAttributes) {
+		D_ERR(imx_usb->dev,
+			"<%s> %s type mismatch\n", __func__, usb_ep->name);
+		return -EINVAL;
+	}
+
+	if (imx_ep->fifosize < usb_endpoint_maxp(desc)) {
+		D_ERR(imx_usb->dev,
+			"<%s> bad %s maxpacket\n", __func__, usb_ep->name);
+		return -ERANGE;
+	}
+
+	if (!imx_usb->driver || imx_usb->gadget.speed == USB_SPEED_UNKNOWN) {
+		D_ERR(imx_usb->dev, "<%s> bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	local_irq_save(flags);
+
+	imx_ep->stopped = 0;
+	imx_flush(imx_ep);
+	imx_ep_irq_enable(imx_ep);
+
+	local_irq_restore(flags);
+
+	D_EPX(imx_usb->dev, "<%s> ENABLED %s\n", __func__, usb_ep->name);
+	return 0;
+}
+
+static int imx_ep_disable(struct usb_ep *usb_ep)
+{
+	struct imx_ep_struct *imx_ep = container_of(usb_ep,
+						struct imx_ep_struct, ep);
+	unsigned long flags;
+
+	if (!usb_ep || !EP_NO(imx_ep) || !list_empty(&imx_ep->queue)) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> %s can not be disabled\n",
+			__func__, usb_ep ? imx_ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	local_irq_save(flags);
+
+	imx_ep->stopped = 1;
+	nuke(imx_ep, -ESHUTDOWN);
+	imx_flush(imx_ep);
+	imx_ep_irq_disable(imx_ep);
+
+	local_irq_restore(flags);
+
+	D_EPX(imx_ep->imx_usb->dev,
+		"<%s> DISABLED %s\n", __func__, usb_ep->name);
+	return 0;
+}
+
+static struct usb_request *imx_ep_alloc_request
+					(struct usb_ep *usb_ep, gfp_t gfp_flags)
+{
+	struct imx_request *req;
+
+	if (!usb_ep)
+		return NULL;
+
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+	req->in_use = 0;
+
+	return &req->req;
+}
+
+static void imx_ep_free_request
+			(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+	struct imx_request *req;
+
+	req = container_of(usb_req, struct imx_request, req);
+	WARN_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+static int imx_ep_queue
+	(struct usb_ep *usb_ep, struct usb_request *usb_req, gfp_t gfp_flags)
+{
+	struct imx_ep_struct	*imx_ep;
+	struct imx_udc_struct	*imx_usb;
+	struct imx_request	*req;
+	unsigned long		flags;
+	int			ret = 0;
+
+	imx_ep = container_of(usb_ep, struct imx_ep_struct, ep);
+	imx_usb = imx_ep->imx_usb;
+	req = container_of(usb_req, struct imx_request, req);
+
+	/*
+	  Special care on IMX udc.
+	  Ignore enqueue when after set configuration from the
+	  host. This assume all gadget drivers reply set
+	  configuration with the next ep0 req enqueue.
+	*/
+	if (imx_usb->set_config && !EP_NO(imx_ep)) {
+		imx_usb->set_config = 0;
+		D_ERR(imx_usb->dev,
+			"<%s> gadget reply set config\n", __func__);
+		return 0;
+	}
+
+	if (unlikely(!usb_req || !req || !usb_req->complete || !usb_req->buf)) {
+		D_ERR(imx_usb->dev, "<%s> bad params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (unlikely(!usb_ep || !imx_ep)) {
+		D_ERR(imx_usb->dev, "<%s> bad ep\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!imx_usb->driver || imx_usb->gadget.speed == USB_SPEED_UNKNOWN) {
+		D_ERR(imx_usb->dev, "<%s> bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	/* Debug */
+	D_REQ(imx_usb->dev, "<%s> ep%d %s request for [%d] bytes\n",
+		__func__, EP_NO(imx_ep),
+		((!EP_NO(imx_ep) && imx_ep->imx_usb->ep0state
+							== EP0_IN_DATA_PHASE)
+		|| (EP_NO(imx_ep) && EP_DIR(imx_ep)))
+					? "IN" : "OUT", usb_req->length);
+	dump_req(__func__, imx_ep, usb_req);
+
+	if (imx_ep->stopped) {
+		usb_req->status = -ESHUTDOWN;
+		return -ESHUTDOWN;
+	}
+
+	if (req->in_use) {
+		D_ERR(imx_usb->dev,
+			"<%s> refusing to queue req %p (already queued)\n",
+			__func__, req);
+		return 0;
+	}
+
+	local_irq_save(flags);
+
+	usb_req->status = -EINPROGRESS;
+	usb_req->actual = 0;
+
+	ep_add_request(imx_ep, req);
+
+	if (!EP_NO(imx_ep))
+		ret = handle_ep0(imx_ep);
+	else
+		ret = handle_ep(imx_ep);
+
+	local_irq_restore(flags);
+	return ret;
+}
+
+static int imx_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+
+	struct imx_ep_struct *imx_ep = container_of
+					(usb_ep, struct imx_ep_struct, ep);
+	struct imx_request *req;
+	unsigned long flags;
+
+	if (unlikely(!usb_ep || !EP_NO(imx_ep))) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+		return -EINVAL;
+	}
+
+	local_irq_save(flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &imx_ep->queue, queue) {
+		if (&req->req == usb_req)
+			break;
+	}
+	if (&req->req != usb_req) {
+		local_irq_restore(flags);
+		return -EINVAL;
+	}
+
+	done(imx_ep, req, -ECONNRESET);
+
+	local_irq_restore(flags);
+	return 0;
+}
+
+static int imx_ep_set_halt(struct usb_ep *usb_ep, int value)
+{
+	struct imx_ep_struct *imx_ep = container_of
+					(usb_ep, struct imx_ep_struct, ep);
+	unsigned long flags;
+
+	if (unlikely(!usb_ep || !EP_NO(imx_ep))) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+		return -EINVAL;
+	}
+
+	local_irq_save(flags);
+
+	if ((imx_ep->bEndpointAddress & USB_DIR_IN)
+		&& !list_empty(&imx_ep->queue)) {
+			local_irq_restore(flags);
+			return -EAGAIN;
+	}
+
+	imx_ep_stall(imx_ep);
+
+	local_irq_restore(flags);
+
+	D_EPX(imx_ep->imx_usb->dev, "<%s> %s halt\n", __func__, usb_ep->name);
+	return 0;
+}
+
+static int imx_ep_fifo_status(struct usb_ep *usb_ep)
+{
+	struct imx_ep_struct *imx_ep = container_of
+					(usb_ep, struct imx_ep_struct, ep);
+
+	if (!usb_ep) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+		return -ENODEV;
+	}
+
+	if (imx_ep->imx_usb->gadget.speed == USB_SPEED_UNKNOWN)
+		return 0;
+	else
+		return imx_fifo_bcount(imx_ep);
+}
+
+static void imx_ep_fifo_flush(struct usb_ep *usb_ep)
+{
+	struct imx_ep_struct *imx_ep = container_of
+					(usb_ep, struct imx_ep_struct, ep);
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	if (!usb_ep || !EP_NO(imx_ep) || !list_empty(&imx_ep->queue)) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+		local_irq_restore(flags);
+		return;
+	}
+
+	/* toggle and halt bits stay unchanged */
+	imx_flush(imx_ep);
+
+	local_irq_restore(flags);
+}
+
+static struct usb_ep_ops imx_ep_ops = {
+	.enable		= imx_ep_enable,
+	.disable	= imx_ep_disable,
+
+	.alloc_request	= imx_ep_alloc_request,
+	.free_request	= imx_ep_free_request,
+
+	.queue		= imx_ep_queue,
+	.dequeue	= imx_ep_dequeue,
+
+	.set_halt	= imx_ep_set_halt,
+	.fifo_status	= imx_ep_fifo_status,
+	.fifo_flush	= imx_ep_fifo_flush,
+};
+
+/*******************************************************************************
+ * USB endpoint control functions
+ *******************************************************************************
+ */
+
+void ep0_chg_stat(const char *label,
+			struct imx_udc_struct *imx_usb, enum ep0_state stat)
+{
+	D_EP0(imx_usb->dev, "<%s> from %15s to %15s\n",
+		label, state_name[imx_usb->ep0state], state_name[stat]);
+
+	if (imx_usb->ep0state == stat)
+		return;
+
+	imx_usb->ep0state = stat;
+}
+
+static void usb_init_data(struct imx_udc_struct *imx_usb)
+{
+	struct imx_ep_struct *imx_ep;
+	u8 i;
+
+	/* device/ep0 records init */
+	INIT_LIST_HEAD(&imx_usb->gadget.ep_list);
+	INIT_LIST_HEAD(&imx_usb->gadget.ep0->ep_list);
+	ep0_chg_stat(__func__, imx_usb, EP0_IDLE);
+
+	/* basic endpoint records init */
+	for (i = 0; i < IMX_USB_NB_EP; i++) {
+		imx_ep = &imx_usb->imx_ep[i];
+
+		if (i) {
+			list_add_tail(&imx_ep->ep.ep_list,
+				&imx_usb->gadget.ep_list);
+			imx_ep->stopped = 1;
+		} else
+			imx_ep->stopped = 0;
+
+		INIT_LIST_HEAD(&imx_ep->queue);
+	}
+}
+
+static void udc_stop_activity(struct imx_udc_struct *imx_usb,
+					struct usb_gadget_driver *driver)
+{
+	struct imx_ep_struct *imx_ep;
+	int i;
+
+	if (imx_usb->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+
+	/* prevent new request submissions, kill any outstanding requests  */
+	for (i = 1; i < IMX_USB_NB_EP; i++) {
+		imx_ep = &imx_usb->imx_ep[i];
+		imx_flush(imx_ep);
+		imx_ep->stopped = 1;
+		imx_ep_irq_disable(imx_ep);
+		nuke(imx_ep, -ESHUTDOWN);
+	}
+
+	imx_usb->cfg = 0;
+	imx_usb->intf = 0;
+	imx_usb->alt = 0;
+
+	if (driver)
+		driver->disconnect(&imx_usb->gadget);
+}
+
+/*******************************************************************************
+ * Interrupt handlers
+ *******************************************************************************
+ */
+
+/*
+ * Called when timer expires.
+ * Timer is started when CFG_CHG is received.
+ */
+static void handle_config(unsigned long data)
+{
+	struct imx_udc_struct *imx_usb = (void *)data;
+	struct usb_ctrlrequest u;
+	int temp, cfg, intf, alt;
+
+	local_irq_disable();
+
+	temp = __raw_readl(imx_usb->base + USB_STAT);
+	cfg  = (temp & STAT_CFG) >> 5;
+	intf = (temp & STAT_INTF) >> 3;
+	alt  =  temp & STAT_ALTSET;
+
+	D_REQ(imx_usb->dev,
+		"<%s> orig config C=%d, I=%d, A=%d / "
+		"req config C=%d, I=%d, A=%d\n",
+		__func__, imx_usb->cfg, imx_usb->intf, imx_usb->alt,
+		cfg, intf, alt);
+
+	if (cfg == 1 || cfg == 2) {
+
+		if (imx_usb->cfg != cfg) {
+			u.bRequest = USB_REQ_SET_CONFIGURATION;
+			u.bRequestType = USB_DIR_OUT |
+					USB_TYPE_STANDARD |
+					USB_RECIP_DEVICE;
+			u.wValue = cfg;
+			u.wIndex = 0;
+			u.wLength = 0;
+			imx_usb->cfg = cfg;
+			imx_usb->driver->setup(&imx_usb->gadget, &u);
+
+		}
+		if (imx_usb->intf != intf || imx_usb->alt != alt) {
+			u.bRequest = USB_REQ_SET_INTERFACE;
+			u.bRequestType = USB_DIR_OUT |
+					  USB_TYPE_STANDARD |
+					  USB_RECIP_INTERFACE;
+			u.wValue = alt;
+			u.wIndex = intf;
+			u.wLength = 0;
+			imx_usb->intf = intf;
+			imx_usb->alt = alt;
+			imx_usb->driver->setup(&imx_usb->gadget, &u);
+		}
+	}
+
+	imx_usb->set_config = 0;
+
+	local_irq_enable();
+}
+
+static irqreturn_t imx_udc_irq(int irq, void *dev)
+{
+	struct imx_udc_struct *imx_usb = dev;
+	int intr = __raw_readl(imx_usb->base + USB_INTR);
+	int temp;
+
+	if (intr & (INTR_WAKEUP | INTR_SUSPEND | INTR_RESUME | INTR_RESET_START
+			| INTR_RESET_STOP | INTR_CFG_CHG)) {
+				dump_intr(__func__, intr, imx_usb->dev);
+				dump_usb_stat(__func__, imx_usb);
+	}
+
+	if (!imx_usb->driver)
+		goto end_irq;
+
+	if (intr & INTR_SOF) {
+		/* Copy from Freescale BSP.
+		   We must enable SOF intr and set CMDOVER.
+		   Datasheet don't specifiy this action, but it
+		   is done in Freescale BSP, so just copy it.
+		*/
+		if (imx_usb->ep0state == EP0_IDLE) {
+			temp = __raw_readl(imx_usb->base + USB_CTRL);
+			__raw_writel(temp | CTRL_CMDOVER,
+						imx_usb->base + USB_CTRL);
+		}
+	}
+
+	if (intr & INTR_CFG_CHG) {
+		/* A workaround of serious IMX UDC bug.
+		   Handling of CFG_CHG should be delayed for some time, because
+		   IMX does not NACK the host when CFG_CHG interrupt is pending.
+		   There is no time to handle current CFG_CHG
+		   if next CFG_CHG or SETUP packed is send immediately.
+		   We have to clear CFG_CHG, start the timer and
+		   NACK the host by setting CTRL_CMDOVER
+		   if it sends any SETUP packet.
+		   When timer expires, handler is called to handle configuration
+		   changes. While CFG_CHG is not handled (set_config=1),
+		   we must NACK the host to every SETUP packed.
+		   This delay prevents from going out of sync with host.
+		 */
+		__raw_writel(INTR_CFG_CHG, imx_usb->base + USB_INTR);
+		imx_usb->set_config = 1;
+		mod_timer(&imx_usb->timer, jiffies + 5);
+		goto end_irq;
+	}
+
+	if (intr & INTR_WAKEUP) {
+		if (imx_usb->gadget.speed == USB_SPEED_UNKNOWN
+			&& imx_usb->driver && imx_usb->driver->resume)
+				imx_usb->driver->resume(&imx_usb->gadget);
+		imx_usb->set_config = 0;
+		del_timer(&imx_usb->timer);
+		imx_usb->gadget.speed = USB_SPEED_FULL;
+	}
+
+	if (intr & INTR_SUSPEND) {
+		if (imx_usb->gadget.speed != USB_SPEED_UNKNOWN
+			&& imx_usb->driver && imx_usb->driver->suspend)
+				imx_usb->driver->suspend(&imx_usb->gadget);
+		imx_usb->set_config = 0;
+		del_timer(&imx_usb->timer);
+		imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+	}
+
+	if (intr & INTR_RESET_START) {
+		__raw_writel(intr, imx_usb->base + USB_INTR);
+		udc_stop_activity(imx_usb, imx_usb->driver);
+		imx_usb->set_config = 0;
+		del_timer(&imx_usb->timer);
+		imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+	}
+
+	if (intr & INTR_RESET_STOP)
+		imx_usb->gadget.speed = USB_SPEED_FULL;
+
+end_irq:
+	__raw_writel(intr, imx_usb->base + USB_INTR);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t imx_udc_ctrl_irq(int irq, void *dev)
+{
+	struct imx_udc_struct *imx_usb = dev;
+	struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[0];
+	int intr = __raw_readl(imx_usb->base + USB_EP_INTR(0));
+
+	dump_ep_intr(__func__, 0, intr, imx_usb->dev);
+
+	if (!imx_usb->driver) {
+		__raw_writel(intr, imx_usb->base + USB_EP_INTR(0));
+		return IRQ_HANDLED;
+	}
+
+	/* DEVREQ has highest priority */
+	if (intr & (EPINTR_DEVREQ | EPINTR_MDEVREQ))
+		handle_ep0_devreq(imx_usb);
+	/* Seem i.MX is missing EOF interrupt sometimes.
+	 * Therefore we don't monitor EOF.
+	 * We call handle_ep0() only if a request is queued for ep0.
+	 */
+	else if (!list_empty(&imx_ep->queue))
+		handle_ep0(imx_ep);
+
+	__raw_writel(intr, imx_usb->base + USB_EP_INTR(0));
+
+	return IRQ_HANDLED;
+}
+
+#ifndef MX1_INT_USBD0
+#define MX1_INT_USBD0 MX1_USBD_INT0
+#endif
+
+static irqreturn_t imx_udc_bulk_irq(int irq, void *dev)
+{
+	struct imx_udc_struct *imx_usb = dev;
+	struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - MX1_INT_USBD0];
+	int intr = __raw_readl(imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+
+	dump_ep_intr(__func__, irq - MX1_INT_USBD0, intr, imx_usb->dev);
+
+	if (!imx_usb->driver) {
+		__raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+		return IRQ_HANDLED;
+	}
+
+	handle_ep(imx_ep);
+
+	__raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+
+	return IRQ_HANDLED;
+}
+
+irq_handler_t intr_handler(int i)
+{
+	switch (i) {
+	case 0:
+		return imx_udc_ctrl_irq;
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		return imx_udc_bulk_irq;
+	default:
+		return imx_udc_irq;
+	}
+}
+
+/*******************************************************************************
+ * Static defined IMX UDC structure
+ *******************************************************************************
+ */
+
+static int imx_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int imx_udc_stop(struct usb_gadget_driver *driver);
+static const struct usb_gadget_ops imx_udc_ops = {
+	.get_frame	 = imx_udc_get_frame,
+	.wakeup		 = imx_udc_wakeup,
+	.start		= imx_udc_start,
+	.stop		= imx_udc_stop,
+};
+
+static struct imx_udc_struct controller = {
+	.gadget = {
+		.ops		= &imx_udc_ops,
+		.ep0		= &controller.imx_ep[0].ep,
+		.name		= driver_name,
+		.dev = {
+			.init_name	= "gadget",
+		},
+	},
+
+	.imx_ep[0] = {
+		.ep = {
+			.name		= ep0name,
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 32,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 32,
+		.bEndpointAddress	= 0,
+		.bmAttributes		= USB_ENDPOINT_XFER_CONTROL,
+	 },
+	.imx_ep[1] = {
+		.ep = {
+			.name		= "ep1in-bulk",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 64,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 64,
+		.bEndpointAddress	= USB_DIR_IN | 1,
+		.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	 },
+	.imx_ep[2] = {
+		.ep = {
+			.name		= "ep2out-bulk",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 64,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 64,
+		.bEndpointAddress	= USB_DIR_OUT | 2,
+		.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	 },
+	.imx_ep[3] = {
+		.ep = {
+			.name		= "ep3out-bulk",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 32,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 32,
+		.bEndpointAddress 	= USB_DIR_OUT | 3,
+		.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	 },
+	.imx_ep[4] = {
+		.ep = {
+			.name		= "ep4in-int",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 32,
+		 },
+		.imx_usb		= &controller,
+		.fifosize		= 32,
+		.bEndpointAddress 	= USB_DIR_IN | 4,
+		.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	 },
+	.imx_ep[5] = {
+		.ep = {
+			.name		= "ep5out-int",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 32,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 32,
+		.bEndpointAddress 	= USB_DIR_OUT | 5,
+		.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	 },
+};
+
+/*******************************************************************************
+ * USB gadget driver functions
+ *******************************************************************************
+ */
+static int imx_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct imx_udc_struct *imx_usb = &controller;
+	int retval;
+
+	if (!driver
+		|| driver->max_speed < USB_SPEED_FULL
+		|| !bind
+		|| !driver->disconnect
+		|| !driver->setup)
+			return -EINVAL;
+	if (!imx_usb)
+		return -ENODEV;
+	if (imx_usb->driver)
+		return -EBUSY;
+
+	/* first hook up the driver ... */
+	imx_usb->driver = driver;
+	imx_usb->gadget.dev.driver = &driver->driver;
+
+	retval = device_add(&imx_usb->gadget.dev);
+	if (retval)
+		goto fail;
+	retval = bind(&imx_usb->gadget);
+	if (retval) {
+		D_ERR(imx_usb->dev, "<%s> bind to driver %s --> error %d\n",
+			__func__, driver->driver.name, retval);
+		device_del(&imx_usb->gadget.dev);
+
+		goto fail;
+	}
+
+	D_INI(imx_usb->dev, "<%s> registered gadget driver '%s'\n",
+		__func__, driver->driver.name);
+
+	imx_udc_enable(imx_usb);
+
+	return 0;
+fail:
+	imx_usb->driver = NULL;
+	imx_usb->gadget.dev.driver = NULL;
+	return retval;
+}
+
+static int imx_udc_stop(struct usb_gadget_driver *driver)
+{
+	struct imx_udc_struct *imx_usb = &controller;
+
+	if (!imx_usb)
+		return -ENODEV;
+	if (!driver || driver != imx_usb->driver || !driver->unbind)
+		return -EINVAL;
+
+	udc_stop_activity(imx_usb, driver);
+	imx_udc_disable(imx_usb);
+	del_timer(&imx_usb->timer);
+
+	driver->unbind(&imx_usb->gadget);
+	imx_usb->gadget.dev.driver = NULL;
+	imx_usb->driver = NULL;
+
+	device_del(&imx_usb->gadget.dev);
+
+	D_INI(imx_usb->dev, "<%s> unregistered gadget driver '%s'\n",
+		__func__, driver->driver.name);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Module functions
+ *******************************************************************************
+ */
+
+static int __init imx_udc_probe(struct platform_device *pdev)
+{
+	struct imx_udc_struct *imx_usb = &controller;
+	struct resource *res;
+	struct imxusb_platform_data *pdata;
+	struct clk *clk;
+	void __iomem *base;
+	int ret = 0;
+	int i;
+	resource_size_t res_size;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't get device resources\n");
+		return -ENODEV;
+	}
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "driver needs platform data\n");
+		return -ENODEV;
+	}
+
+	res_size = resource_size(res);
+	if (!request_mem_region(res->start, res_size, res->name)) {
+		dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
+			res_size, res->start);
+		return -ENOMEM;
+	}
+
+	if (pdata->init) {
+		ret = pdata->init(&pdev->dev);
+		if (ret)
+			goto fail0;
+	}
+
+	base = ioremap(res->start, res_size);
+	if (!base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		ret = -EIO;
+		goto fail1;
+	}
+
+	clk = clk_get(NULL, "usbd_clk");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev, "can't get USB clock\n");
+		goto fail2;
+	}
+	clk_enable(clk);
+
+	if (clk_get_rate(clk) != 48000000) {
+		D_INI(&pdev->dev,
+			"Bad USB clock (%d Hz), changing to 48000000 Hz\n",
+			(int)clk_get_rate(clk));
+		if (clk_set_rate(clk, 48000000)) {
+			dev_err(&pdev->dev,
+				"Unable to set correct USB clock (48MHz)\n");
+			ret = -EIO;
+			goto fail3;
+		}
+	}
+
+	for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
+		imx_usb->usbd_int[i] = platform_get_irq(pdev, i);
+		if (imx_usb->usbd_int[i] < 0) {
+			dev_err(&pdev->dev, "can't get irq number\n");
+			ret = -ENODEV;
+			goto fail3;
+		}
+	}
+
+	for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
+		ret = request_irq(imx_usb->usbd_int[i], intr_handler(i),
+				     0, driver_name, imx_usb);
+		if (ret) {
+			dev_err(&pdev->dev, "can't get irq %i, err %d\n",
+				imx_usb->usbd_int[i], ret);
+			for (--i; i >= 0; i--)
+				free_irq(imx_usb->usbd_int[i], imx_usb);
+			goto fail3;
+		}
+	}
+
+	imx_usb->res = res;
+	imx_usb->base = base;
+	imx_usb->clk = clk;
+	imx_usb->dev = &pdev->dev;
+
+	device_initialize(&imx_usb->gadget.dev);
+
+	imx_usb->gadget.dev.parent = &pdev->dev;
+	imx_usb->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+	platform_set_drvdata(pdev, imx_usb);
+
+	usb_init_data(imx_usb);
+	imx_udc_init(imx_usb);
+
+	init_timer(&imx_usb->timer);
+	imx_usb->timer.function = handle_config;
+	imx_usb->timer.data = (unsigned long)imx_usb;
+
+	ret = usb_add_gadget_udc(&pdev->dev, &imx_usb->gadget);
+	if (ret)
+		goto fail4;
+
+	return 0;
+fail4:
+	for (i = 0; i < IMX_USB_NB_EP + 1; i++)
+		free_irq(imx_usb->usbd_int[i], imx_usb);
+fail3:
+	clk_put(clk);
+	clk_disable(clk);
+fail2:
+	iounmap(base);
+fail1:
+	if (pdata->exit)
+		pdata->exit(&pdev->dev);
+fail0:
+	release_mem_region(res->start, res_size);
+	return ret;
+}
+
+static int __exit imx_udc_remove(struct platform_device *pdev)
+{
+	struct imx_udc_struct *imx_usb = platform_get_drvdata(pdev);
+	struct imxusb_platform_data *pdata = pdev->dev.platform_data;
+	int i;
+
+	usb_del_gadget_udc(&imx_usb->gadget);
+	imx_udc_disable(imx_usb);
+	del_timer(&imx_usb->timer);
+
+	for (i = 0; i < IMX_USB_NB_EP + 1; i++)
+		free_irq(imx_usb->usbd_int[i], imx_usb);
+
+	clk_put(imx_usb->clk);
+	clk_disable(imx_usb->clk);
+	iounmap(imx_usb->base);
+
+	release_mem_region(imx_usb->res->start, resource_size(imx_usb->res));
+
+	if (pdata->exit)
+		pdata->exit(&pdev->dev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_PM
+#define	imx_udc_suspend	NULL
+#define	imx_udc_resume	NULL
+#else
+#define	imx_udc_suspend	NULL
+#define	imx_udc_resume	NULL
+#endif
+
+/*----------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+	.driver		= {
+		.name	= driver_name,
+		.owner	= THIS_MODULE,
+	},
+	.remove		= __exit_p(imx_udc_remove),
+	.suspend	= imx_udc_suspend,
+	.resume		= imx_udc_resume,
+};
+
+static int __init udc_init(void)
+{
+	return platform_driver_probe(&udc_driver, imx_udc_probe);
+}
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+	platform_driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION("IMX USB Device Controller driver");
+MODULE_AUTHOR("Darius Augulis <augulis.darius@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx_udc");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/imx_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/imx_udc.h
new file mode 100644
index 0000000..d118fb7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/imx_udc.h
@@ -0,0 +1,351 @@
+/*
+ *	Copyright (C) 2005 Mike Lee(eemike@gmail.com)
+ *
+ *	This udc driver is now under testing and code is based on pxa2xx_udc.h
+ *	Please use it with your own risk!
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#ifndef __LINUX_USB_GADGET_IMX_H
+#define __LINUX_USB_GADGET_IMX_H
+
+#include <linux/types.h>
+
+/* Helper macros */
+#define EP_NO(ep)	((ep->bEndpointAddress) & ~USB_DIR_IN) /* IN:1, OUT:0 */
+#define EP_DIR(ep)	((ep->bEndpointAddress) & USB_DIR_IN ? 1 : 0)
+#define IMX_USB_NB_EP	6
+
+/* Driver structures */
+struct imx_request {
+	struct usb_request			req;
+	struct list_head			queue;
+	unsigned int				in_use;
+};
+
+enum ep0_state {
+	EP0_IDLE,
+	EP0_IN_DATA_PHASE,
+	EP0_OUT_DATA_PHASE,
+	EP0_CONFIG,
+	EP0_STALL,
+};
+
+struct imx_ep_struct {
+	struct usb_ep				ep;
+	struct imx_udc_struct			*imx_usb;
+	struct list_head			queue;
+	unsigned char				stopped;
+	unsigned char				fifosize;
+	unsigned char				bEndpointAddress;
+	unsigned char				bmAttributes;
+};
+
+struct imx_udc_struct {
+	struct usb_gadget			gadget;
+	struct usb_gadget_driver		*driver;
+	struct device				*dev;
+	struct imx_ep_struct			imx_ep[IMX_USB_NB_EP];
+	struct clk				*clk;
+	struct timer_list			timer;
+	enum ep0_state				ep0state;
+	struct resource				*res;
+	void __iomem				*base;
+	unsigned char				set_config;
+	int					cfg,
+						intf,
+						alt,
+						usbd_int[7];
+};
+
+/* USB registers */
+#define  USB_FRAME		(0x00)	/* USB frame */
+#define  USB_SPEC		(0x04)	/* USB Spec */
+#define  USB_STAT		(0x08)	/* USB Status */
+#define  USB_CTRL		(0x0C)	/* USB Control */
+#define  USB_DADR		(0x10)	/* USB Desc RAM addr */
+#define  USB_DDAT		(0x14)	/* USB Desc RAM/EP buffer data */
+#define  USB_INTR		(0x18)	/* USB interrupt */
+#define  USB_MASK		(0x1C)	/* USB Mask */
+#define  USB_ENAB		(0x24)	/* USB Enable */
+#define  USB_EP_STAT(x)		(0x30 + (x*0x30)) /* USB status/control */
+#define  USB_EP_INTR(x)		(0x34 + (x*0x30)) /* USB interrupt */
+#define  USB_EP_MASK(x)		(0x38 + (x*0x30)) /* USB mask */
+#define  USB_EP_FDAT(x)		(0x3C + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FDAT0(x)	(0x3C + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FDAT1(x)	(0x3D + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FDAT2(x)	(0x3E + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FDAT3(x)	(0x3F + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FSTAT(x)	(0x40 + (x*0x30)) /* USB FIFO status */
+#define  USB_EP_FCTRL(x)	(0x44 + (x*0x30)) /* USB FIFO control */
+#define  USB_EP_LRFP(x)		(0x48 + (x*0x30)) /* USB last rd f. pointer */
+#define  USB_EP_LWFP(x)		(0x4C + (x*0x30)) /* USB last wr f. pointer */
+#define  USB_EP_FALRM(x)	(0x50 + (x*0x30)) /* USB FIFO alarm */
+#define  USB_EP_FRDP(x)		(0x54 + (x*0x30)) /* USB FIFO read pointer */
+#define  USB_EP_FWRP(x)		(0x58 + (x*0x30)) /* USB FIFO write pointer */
+/* USB Control Register Bit Fields.*/
+#define CTRL_CMDOVER		(1<<6)	/* UDC status */
+#define CTRL_CMDERROR		(1<<5)	/* UDC status */
+#define CTRL_FE_ENA		(1<<3)	/* Enable Font End logic */
+#define CTRL_UDC_RST		(1<<2)	/* UDC reset */
+#define CTRL_AFE_ENA		(1<<1)	/* Analog Font end enable */
+#define CTRL_RESUME		(1<<0)	/* UDC resume */
+/* USB Status Register Bit Fields.*/
+#define STAT_RST		(1<<8)
+#define STAT_SUSP		(1<<7)
+#define STAT_CFG		(3<<5)
+#define STAT_INTF		(3<<3)
+#define STAT_ALTSET		(7<<0)
+/* USB Interrupt Status/Mask Registers Bit fields */
+#define INTR_WAKEUP		(1<<31)	/* Wake up Interrupt */
+#define INTR_MSOF		(1<<7)	/* Missed Start of Frame */
+#define INTR_SOF		(1<<6)	/* Start of Frame */
+#define INTR_RESET_STOP		(1<<5)	/* Reset Signaling stop */
+#define INTR_RESET_START	(1<<4)	/* Reset Signaling start */
+#define INTR_RESUME		(1<<3)	/* Suspend to resume */
+#define INTR_SUSPEND		(1<<2)	/* Active to suspend */
+#define INTR_FRAME_MATCH	(1<<1)	/* Frame matched */
+#define INTR_CFG_CHG		(1<<0)	/* Configuration change occurred */
+/* USB Enable Register Bit Fields.*/
+#define ENAB_RST		(1<<31)	/* Reset USB modules */
+#define ENAB_ENAB		(1<<30)	/* Enable USB modules*/
+#define ENAB_SUSPEND		(1<<29)	/* Suspend USB modules */
+#define ENAB_ENDIAN		(1<<28)	/* Endian of USB modules */
+#define ENAB_PWRMD		(1<<0)	/* Power mode of USB modules */
+/* USB Descriptor Ram Address Register bit fields */
+#define DADR_CFG		(1<<31)	/* Configuration */
+#define DADR_BSY		(1<<30)	/* Busy status */
+#define DADR_DADR		(0x1FF)	/* Descriptor Ram Address */
+/* USB Descriptor RAM/Endpoint Buffer Data Register bit fields */
+#define DDAT_DDAT		(0xFF)	/* Descriptor Endpoint Buffer */
+/* USB Endpoint Status Register bit fields */
+#define EPSTAT_BCOUNT		(0x7F<<16)	/* Endpoint FIFO byte count */
+#define EPSTAT_SIP		(1<<8)	/* Endpoint setup in progress */
+#define EPSTAT_DIR		(1<<7)	/* Endpoint transfer direction */
+#define EPSTAT_MAX		(3<<5)	/* Endpoint Max packet size */
+#define EPSTAT_TYP		(3<<3)	/* Endpoint type */
+#define EPSTAT_ZLPS		(1<<2)	/* Send zero length packet */
+#define EPSTAT_FLUSH		(1<<1)	/* Endpoint FIFO Flush */
+#define EPSTAT_STALL		(1<<0)	/* Force stall */
+/* USB Endpoint FIFO Status Register bit fields */
+#define FSTAT_FRAME_STAT	(0xF<<24)	/* Frame status bit [0-3] */
+#define FSTAT_ERR		(1<<22)	/* FIFO error */
+#define FSTAT_UF		(1<<21)	/* FIFO underflow */
+#define FSTAT_OF		(1<<20)	/* FIFO overflow */
+#define FSTAT_FR		(1<<19)	/* FIFO frame ready */
+#define FSTAT_FULL		(1<<18)	/* FIFO full */
+#define FSTAT_ALRM		(1<<17)	/* FIFO alarm */
+#define FSTAT_EMPTY		(1<<16)	/* FIFO empty */
+/* USB Endpoint FIFO Control Register bit fields */
+#define FCTRL_WFR		(1<<29)	/* Write frame end */
+/* USB Endpoint Interrupt Status Regsiter bit fields */
+#define EPINTR_FIFO_FULL	(1<<8)	/* fifo full */
+#define EPINTR_FIFO_EMPTY	(1<<7)	/* fifo empty */
+#define EPINTR_FIFO_ERROR	(1<<6)	/* fifo error */
+#define EPINTR_FIFO_HIGH	(1<<5)	/* fifo high */
+#define EPINTR_FIFO_LOW		(1<<4)	/* fifo low */
+#define EPINTR_MDEVREQ		(1<<3)	/* multi Device request */
+#define EPINTR_EOT		(1<<2)	/* fifo end of transfer */
+#define EPINTR_DEVREQ		(1<<1)	/* Device request */
+#define EPINTR_EOF		(1<<0)	/* fifo end of frame */
+
+/* Debug macros */
+#ifdef DEBUG
+
+/* #define DEBUG_REQ */
+/* #define DEBUG_TRX */
+/* #define DEBUG_INIT */
+/* #define DEBUG_EP0 */
+/* #define DEBUG_EPX */
+/* #define DEBUG_IRQ */
+/* #define DEBUG_EPIRQ */
+/* #define DEBUG_DUMP */
+/* #define DEBUG_ERR */
+
+#ifdef DEBUG_REQ
+	#define D_REQ(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_REQ(dev, args...)	do {} while (0)
+#endif /* DEBUG_REQ */
+
+#ifdef DEBUG_TRX
+	#define D_TRX(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_TRX(dev, args...)	do {} while (0)
+#endif /* DEBUG_TRX */
+
+#ifdef DEBUG_INIT
+	#define D_INI(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_INI(dev, args...)	do {} while (0)
+#endif /* DEBUG_INIT */
+
+#ifdef DEBUG_EP0
+	static const char *state_name[] = {
+		"EP0_IDLE",
+		"EP0_IN_DATA_PHASE",
+		"EP0_OUT_DATA_PHASE",
+		"EP0_CONFIG",
+		"EP0_STALL"
+	};
+	#define D_EP0(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_EP0(dev, args...)	do {} while (0)
+#endif /* DEBUG_EP0 */
+
+#ifdef DEBUG_EPX
+	#define D_EPX(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_EPX(dev, args...)	do {} while (0)
+#endif /* DEBUG_EP0 */
+
+#ifdef DEBUG_IRQ
+	static void dump_intr(const char *label, int irqreg, struct device *dev)
+	{
+		dev_dbg(dev, "<%s> USB_INTR=[%s%s%s%s%s%s%s%s%s]\n", label,
+			(irqreg & INTR_WAKEUP) ? " wake" : "",
+			(irqreg & INTR_MSOF) ? " msof" : "",
+			(irqreg & INTR_SOF) ? " sof" : "",
+			(irqreg & INTR_RESUME) ? " resume" : "",
+			(irqreg & INTR_SUSPEND) ? " suspend" : "",
+			(irqreg & INTR_RESET_STOP) ? " noreset" : "",
+			(irqreg & INTR_RESET_START) ? " reset" : "",
+			(irqreg & INTR_FRAME_MATCH) ? " fmatch" : "",
+			(irqreg & INTR_CFG_CHG) ? " config" : "");
+	}
+#else
+	#define dump_intr(x, y, z)		do {} while (0)
+#endif /* DEBUG_IRQ */
+
+#ifdef DEBUG_EPIRQ
+	static void dump_ep_intr(const char *label, int nr, int irqreg,
+							struct device *dev)
+	{
+		dev_dbg(dev, "<%s> EP%d_INTR=[%s%s%s%s%s%s%s%s%s]\n", label, nr,
+			(irqreg & EPINTR_FIFO_FULL) ? " full" : "",
+			(irqreg & EPINTR_FIFO_EMPTY) ? " fempty" : "",
+			(irqreg & EPINTR_FIFO_ERROR) ? " ferr" : "",
+			(irqreg & EPINTR_FIFO_HIGH) ? " fhigh" : "",
+			(irqreg & EPINTR_FIFO_LOW) ? " flow" : "",
+			(irqreg & EPINTR_MDEVREQ) ? " mreq" : "",
+			(irqreg & EPINTR_EOF) ? " eof" : "",
+			(irqreg & EPINTR_DEVREQ) ? " devreq" : "",
+			(irqreg & EPINTR_EOT) ? " eot" : "");
+	}
+#else
+	#define dump_ep_intr(x, y, z, i)	do {} while (0)
+#endif /* DEBUG_IRQ */
+
+#ifdef DEBUG_DUMP
+	static void dump_usb_stat(const char *label,
+						struct imx_udc_struct *imx_usb)
+	{
+		int temp = __raw_readl(imx_usb->base + USB_STAT);
+
+		dev_dbg(imx_usb->dev,
+			"<%s> USB_STAT=[%s%s CFG=%d, INTF=%d, ALTR=%d]\n", label,
+			(temp & STAT_RST) ? " reset" : "",
+			(temp & STAT_SUSP) ? " suspend" : "",
+			(temp & STAT_CFG) >> 5,
+			(temp & STAT_INTF) >> 3,
+			(temp & STAT_ALTSET));
+	}
+
+	static void dump_ep_stat(const char *label,
+						struct imx_ep_struct *imx_ep)
+	{
+		int temp = __raw_readl(imx_ep->imx_usb->base
+						+ USB_EP_INTR(EP_NO(imx_ep)));
+
+		dev_dbg(imx_ep->imx_usb->dev,
+			"<%s> EP%d_INTR=[%s%s%s%s%s%s%s%s%s]\n",
+			label, EP_NO(imx_ep),
+			(temp & EPINTR_FIFO_FULL) ? " full" : "",
+			(temp & EPINTR_FIFO_EMPTY) ? " fempty" : "",
+			(temp & EPINTR_FIFO_ERROR) ? " ferr" : "",
+			(temp & EPINTR_FIFO_HIGH) ? " fhigh" : "",
+			(temp & EPINTR_FIFO_LOW) ? " flow" : "",
+			(temp & EPINTR_MDEVREQ) ? " mreq" : "",
+			(temp & EPINTR_EOF) ? " eof" : "",
+			(temp & EPINTR_DEVREQ) ? " devreq" : "",
+			(temp & EPINTR_EOT) ? " eot" : "");
+
+		temp = __raw_readl(imx_ep->imx_usb->base
+						+ USB_EP_STAT(EP_NO(imx_ep)));
+
+		dev_dbg(imx_ep->imx_usb->dev,
+			"<%s> EP%d_STAT=[%s%s bcount=%d]\n",
+			label, EP_NO(imx_ep),
+			(temp & EPSTAT_SIP) ? " sip" : "",
+			(temp & EPSTAT_STALL) ? " stall" : "",
+			(temp & EPSTAT_BCOUNT) >> 16);
+
+		temp = __raw_readl(imx_ep->imx_usb->base
+						+ USB_EP_FSTAT(EP_NO(imx_ep)));
+
+		dev_dbg(imx_ep->imx_usb->dev,
+			"<%s> EP%d_FSTAT=[%s%s%s%s%s%s%s]\n",
+			label, EP_NO(imx_ep),
+			(temp & FSTAT_ERR) ? " ferr" : "",
+			(temp & FSTAT_UF) ? " funder" : "",
+			(temp & FSTAT_OF) ? " fover" : "",
+			(temp & FSTAT_FR) ? " fready" : "",
+			(temp & FSTAT_FULL) ? " ffull" : "",
+			(temp & FSTAT_ALRM) ? " falarm" : "",
+			(temp & FSTAT_EMPTY) ? " fempty" : "");
+	}
+
+	static void dump_req(const char *label, struct imx_ep_struct *imx_ep,
+							struct usb_request *req)
+	{
+		int i;
+
+		if (!req || !req->buf) {
+			dev_dbg(imx_ep->imx_usb->dev,
+					"<%s> req or req buf is free\n", label);
+			return;
+		}
+
+		if ((!EP_NO(imx_ep) && imx_ep->imx_usb->ep0state
+			== EP0_IN_DATA_PHASE)
+			|| (EP_NO(imx_ep) && EP_DIR(imx_ep))) {
+
+			dev_dbg(imx_ep->imx_usb->dev,
+						"<%s> request dump <", label);
+			for (i = 0; i < req->length; i++)
+				printk("%02x-", *((u8 *)req->buf + i));
+			printk(">\n");
+		}
+	}
+
+#else
+	#define dump_ep_stat(x, y)		do {} while (0)
+	#define dump_usb_stat(x, y)		do {} while (0)
+	#define dump_req(x, y, z)		do {} while (0)
+#endif /* DEBUG_DUMP */
+
+#ifdef DEBUG_ERR
+	#define D_ERR(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_ERR(dev, args...)	do {} while (0)
+#endif
+
+#else
+	#define D_REQ(dev, args...)		do {} while (0)
+	#define D_TRX(dev, args...)		do {} while (0)
+	#define D_INI(dev, args...)		do {} while (0)
+	#define D_EP0(dev, args...)		do {} while (0)
+	#define D_EPX(dev, args...)		do {} while (0)
+	#define dump_ep_intr(x, y, z, i)	do {} while (0)
+	#define dump_intr(x, y, z)		do {} while (0)
+	#define dump_ep_stat(x, y)		do {} while (0)
+	#define dump_usb_stat(x, y)		do {} while (0)
+	#define dump_req(x, y, z)		do {} while (0)
+	#define D_ERR(dev, args...)		do {} while (0)
+#endif /* DEBUG */
+
+#endif /* __LINUX_USB_GADGET_IMX_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/inode.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/inode.c
new file mode 100644
index 0000000..093e907
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/inode.c
@@ -0,0 +1,2146 @@
+/*
+ * inode.c -- user mode filesystem api for usb gadget controllers
+ *
+ * Copyright (C) 2003-2004 David Brownell
+ * Copyright (C) 2003 Agilent Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/uts.h>
+#include <linux/wait.h>
+#include <linux/compiler.h>
+#include <asm/uaccess.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+
+#include <linux/usb/gadgetfs.h>
+#include <linux/usb/gadget.h>
+
+
+/*
+ * The gadgetfs API maps each endpoint to a file descriptor so that you
+ * can use standard synchronous read/write calls for I/O.  There's some
+ * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
+ * drivers show how this works in practice.  You can also use AIO to
+ * eliminate I/O gaps between requests, to help when streaming data.
+ *
+ * Key parts that must be USB-specific are protocols defining how the
+ * read/write operations relate to the hardware state machines.  There
+ * are two types of files.  One type is for the device, implementing ep0.
+ * The other type is for each IN or OUT endpoint.  In both cases, the
+ * user mode driver must configure the hardware before using it.
+ *
+ * - First, dev_config() is called when /dev/gadget/$CHIP is configured
+ *   (by writing configuration and device descriptors).  Afterwards it
+ *   may serve as a source of device events, used to handle all control
+ *   requests other than basic enumeration.
+ *
+ * - Then, after a SET_CONFIGURATION control request, ep_config() is
+ *   called when each /dev/gadget/ep* file is configured (by writing
+ *   endpoint descriptors).  Afterwards these files are used to write()
+ *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
+ *   direction" request is issued (like reading an IN endpoint).
+ *
+ * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
+ * not possible on all hardware.  For example, precise fault handling with
+ * respect to data left in endpoint fifos after aborted operations; or
+ * selective clearing of endpoint halts, to implement SET_INTERFACE.
+ */
+
+#define	DRIVER_DESC	"USB Gadget filesystem"
+#define	DRIVER_VERSION	"24 Aug 2004"
+
+static const char driver_desc [] = DRIVER_DESC;
+static const char shortname [] = "gadgetfs";
+
+MODULE_DESCRIPTION (DRIVER_DESC);
+MODULE_AUTHOR ("David Brownell");
+MODULE_LICENSE ("GPL");
+
+
+/*----------------------------------------------------------------------*/
+
+#define GADGETFS_MAGIC		0xaee71ee7
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+/* /dev/gadget/$CHIP represents ep0 and the whole device */
+enum ep0_state {
+	/* DISBLED is the initial state.
+	 */
+	STATE_DEV_DISABLED = 0,
+
+	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
+	 * ep0/device i/o modes and binding to the controller.  Driver
+	 * must always write descriptors to initialize the device, then
+	 * the device becomes UNCONNECTED until enumeration.
+	 */
+	STATE_DEV_OPENED,
+
+	/* From then on, ep0 fd is in either of two basic modes:
+	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
+	 * - SETUP: read/write will transfer control data and succeed;
+	 *   or if "wrong direction", performs protocol stall
+	 */
+	STATE_DEV_UNCONNECTED,
+	STATE_DEV_CONNECTED,
+	STATE_DEV_SETUP,
+
+	/* UNBOUND means the driver closed ep0, so the device won't be
+	 * accessible again (DEV_DISABLED) until all fds are closed.
+	 */
+	STATE_DEV_UNBOUND,
+};
+
+/* enough for the whole queue: most events invalidate others */
+#define	N_EVENT			5
+
+struct dev_data {
+	spinlock_t			lock;
+	atomic_t			count;
+	enum ep0_state			state;		/* P: lock */
+	struct usb_gadgetfs_event	event [N_EVENT];
+	unsigned			ev_next;
+	struct fasync_struct		*fasync;
+	u8				current_config;
+
+	/* drivers reading ep0 MUST handle control requests (SETUP)
+	 * reported that way; else the host will time out.
+	 */
+	unsigned			usermode_setup : 1,
+					setup_in : 1,
+					setup_can_stall : 1,
+					setup_out_ready : 1,
+					setup_out_error : 1,
+					setup_abort : 1;
+	unsigned			setup_wLength;
+
+	/* the rest is basically write-once */
+	struct usb_config_descriptor	*config, *hs_config;
+	struct usb_device_descriptor	*dev;
+	struct usb_request		*req;
+	struct usb_gadget		*gadget;
+	struct list_head		epfiles;
+	void				*buf;
+	wait_queue_head_t		wait;
+	struct super_block		*sb;
+	struct dentry			*dentry;
+
+	/* except this scratch i/o buffer for ep0 */
+	u8				rbuf [256];
+};
+
+static inline void get_dev (struct dev_data *data)
+{
+	atomic_inc (&data->count);
+}
+
+static void put_dev (struct dev_data *data)
+{
+	if (likely (!atomic_dec_and_test (&data->count)))
+		return;
+	/* needs no more cleanup */
+	BUG_ON (waitqueue_active (&data->wait));
+	kfree (data);
+}
+
+static struct dev_data *dev_new (void)
+{
+	struct dev_data		*dev;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+	dev->state = STATE_DEV_DISABLED;
+	atomic_set (&dev->count, 1);
+	spin_lock_init (&dev->lock);
+	INIT_LIST_HEAD (&dev->epfiles);
+	init_waitqueue_head (&dev->wait);
+	return dev;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* other /dev/gadget/$ENDPOINT files represent endpoints */
+enum ep_state {
+	STATE_EP_DISABLED = 0,
+	STATE_EP_READY,
+	STATE_EP_ENABLED,
+	STATE_EP_UNBOUND,
+};
+
+struct ep_data {
+	struct mutex			lock;
+	enum ep_state			state;
+	atomic_t			count;
+	struct dev_data			*dev;
+	/* must hold dev->lock before accessing ep or req */
+	struct usb_ep			*ep;
+	struct usb_request		*req;
+	ssize_t				status;
+	char				name [16];
+	struct usb_endpoint_descriptor	desc, hs_desc;
+	struct list_head		epfiles;
+	wait_queue_head_t		wait;
+	struct dentry			*dentry;
+	struct inode			*inode;
+};
+
+static inline void get_ep (struct ep_data *data)
+{
+	atomic_inc (&data->count);
+}
+
+static void put_ep (struct ep_data *data)
+{
+	if (likely (!atomic_dec_and_test (&data->count)))
+		return;
+	put_dev (data->dev);
+	/* needs no more cleanup */
+	BUG_ON (!list_empty (&data->epfiles));
+	BUG_ON (waitqueue_active (&data->wait));
+	kfree (data);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* most "how to use the hardware" policy choices are in userspace:
+ * mapping endpoint roles (which the driver needs) to the capabilities
+ * which the usb controller has.  most of those capabilities are exposed
+ * implicitly, starting with the driver name and then endpoint names.
+ */
+
+static const char *CHIP;
+
+/*----------------------------------------------------------------------*/
+
+/* NOTE:  don't use dev_printk calls before binding to the gadget
+ * at the end of ep0 configuration, or after unbind.
+ */
+
+/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
+#define xprintk(d,level,fmt,args...) \
+	printk(level "%s: " fmt , shortname , ## args)
+
+#ifdef DEBUG
+#define DBG(dev,fmt,args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDEBUG	DBG
+#else
+#define VDEBUG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev,fmt,args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+
+/*----------------------------------------------------------------------*/
+
+/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
+ *
+ * After opening, configure non-control endpoints.  Then use normal
+ * stream read() and write() requests; and maybe ioctl() to get more
+ * precise FIFO status when recovering from cancellation.
+ */
+
+static void epio_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct ep_data	*epdata = ep->driver_data;
+
+	if (!req->context)
+		return;
+	if (req->status)
+		epdata->status = req->status;
+	else
+		epdata->status = req->actual;
+	complete ((struct completion *)req->context);
+}
+
+/* tasklock endpoint, returning when it's connected.
+ * still need dev->lock to use epdata->ep.
+ */
+static int
+get_ready_ep (unsigned f_flags, struct ep_data *epdata)
+{
+	int	val;
+
+	if (f_flags & O_NONBLOCK) {
+		if (!mutex_trylock(&epdata->lock))
+			goto nonblock;
+		if (epdata->state != STATE_EP_ENABLED) {
+			mutex_unlock(&epdata->lock);
+nonblock:
+			val = -EAGAIN;
+		} else
+			val = 0;
+		return val;
+	}
+
+	val = mutex_lock_interruptible(&epdata->lock);
+	if (val < 0)
+		return val;
+
+	switch (epdata->state) {
+	case STATE_EP_ENABLED:
+		break;
+	// case STATE_EP_DISABLED:		/* "can't happen" */
+	// case STATE_EP_READY:			/* "can't happen" */
+	default:				/* error! */
+		pr_debug ("%s: ep %p not available, state %d\n",
+				shortname, epdata, epdata->state);
+		// FALLTHROUGH
+	case STATE_EP_UNBOUND:			/* clean disconnect */
+		val = -ENODEV;
+		mutex_unlock(&epdata->lock);
+	}
+	return val;
+}
+
+static ssize_t
+ep_io (struct ep_data *epdata, void *buf, unsigned len)
+{
+	DECLARE_COMPLETION_ONSTACK (done);
+	int value;
+
+	spin_lock_irq (&epdata->dev->lock);
+	if (likely (epdata->ep != NULL)) {
+		struct usb_request	*req = epdata->req;
+
+		req->context = &done;
+		req->complete = epio_complete;
+		req->buf = buf;
+		req->length = len;
+		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
+	} else
+		value = -ENODEV;
+	spin_unlock_irq (&epdata->dev->lock);
+
+	if (likely (value == 0)) {
+		value = wait_event_interruptible (done.wait, done.done);
+		if (value != 0) {
+			spin_lock_irq (&epdata->dev->lock);
+			if (likely (epdata->ep != NULL)) {
+				DBG (epdata->dev, "%s i/o interrupted\n",
+						epdata->name);
+				usb_ep_dequeue (epdata->ep, epdata->req);
+				spin_unlock_irq (&epdata->dev->lock);
+
+				wait_event (done.wait, done.done);
+				if (epdata->status == -ECONNRESET)
+					epdata->status = -EINTR;
+			} else {
+				spin_unlock_irq (&epdata->dev->lock);
+
+				DBG (epdata->dev, "endpoint gone\n");
+				epdata->status = -ENODEV;
+			}
+		}
+		return epdata->status;
+	}
+	return value;
+}
+
+
+/* handle a synchronous OUT bulk/intr/iso transfer */
+static ssize_t
+ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+{
+	struct ep_data		*data = fd->private_data;
+	void			*kbuf;
+	ssize_t			value;
+
+	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
+		return value;
+
+	/* halt any endpoint by doing a "wrong direction" i/o call */
+	if (usb_endpoint_dir_in(&data->desc)) {
+		if (usb_endpoint_xfer_isoc(&data->desc)) {
+			mutex_unlock(&data->lock);
+			return -EINVAL;
+		}
+		DBG (data->dev, "%s halt\n", data->name);
+		spin_lock_irq (&data->dev->lock);
+		if (likely (data->ep != NULL))
+			usb_ep_set_halt (data->ep);
+		spin_unlock_irq (&data->dev->lock);
+		mutex_unlock(&data->lock);
+		return -EBADMSG;
+	}
+
+	/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
+
+	value = -ENOMEM;
+	kbuf = kmalloc (len, GFP_KERNEL);
+	if (unlikely (!kbuf))
+		goto free1;
+
+	value = ep_io (data, kbuf, len);
+	VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
+		data->name, len, (int) value);
+	if (value >= 0 && copy_to_user (buf, kbuf, value))
+		value = -EFAULT;
+
+free1:
+	mutex_unlock(&data->lock);
+	kfree (kbuf);
+	return value;
+}
+
+/* handle a synchronous IN bulk/intr/iso transfer */
+static ssize_t
+ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct ep_data		*data = fd->private_data;
+	void			*kbuf;
+	ssize_t			value;
+
+	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
+		return value;
+
+	/* halt any endpoint by doing a "wrong direction" i/o call */
+	if (!usb_endpoint_dir_in(&data->desc)) {
+		if (usb_endpoint_xfer_isoc(&data->desc)) {
+			mutex_unlock(&data->lock);
+			return -EINVAL;
+		}
+		DBG (data->dev, "%s halt\n", data->name);
+		spin_lock_irq (&data->dev->lock);
+		if (likely (data->ep != NULL))
+			usb_ep_set_halt (data->ep);
+		spin_unlock_irq (&data->dev->lock);
+		mutex_unlock(&data->lock);
+		return -EBADMSG;
+	}
+
+	/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
+
+	value = -ENOMEM;
+	kbuf = kmalloc (len, GFP_KERNEL);
+	if (!kbuf)
+		goto free1;
+	if (copy_from_user (kbuf, buf, len)) {
+		value = -EFAULT;
+		goto free1;
+	}
+
+	value = ep_io (data, kbuf, len);
+	VDEBUG (data->dev, "%s write %zu IN, status %d\n",
+		data->name, len, (int) value);
+free1:
+	mutex_unlock(&data->lock);
+	kfree (kbuf);
+	return value;
+}
+
+static int
+ep_release (struct inode *inode, struct file *fd)
+{
+	struct ep_data		*data = fd->private_data;
+	int value;
+
+	value = mutex_lock_interruptible(&data->lock);
+	if (value < 0)
+		return value;
+
+	/* clean up if this can be reopened */
+	if (data->state != STATE_EP_UNBOUND) {
+		data->state = STATE_EP_DISABLED;
+		data->desc.bDescriptorType = 0;
+		data->hs_desc.bDescriptorType = 0;
+		usb_ep_disable(data->ep);
+	}
+	mutex_unlock(&data->lock);
+	put_ep (data);
+	return 0;
+}
+
+static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
+{
+	struct ep_data		*data = fd->private_data;
+	int			status;
+
+	if ((status = get_ready_ep (fd->f_flags, data)) < 0)
+		return status;
+
+	spin_lock_irq (&data->dev->lock);
+	if (likely (data->ep != NULL)) {
+		switch (code) {
+		case GADGETFS_FIFO_STATUS:
+			status = usb_ep_fifo_status (data->ep);
+			break;
+		case GADGETFS_FIFO_FLUSH:
+			usb_ep_fifo_flush (data->ep);
+			break;
+		case GADGETFS_CLEAR_HALT:
+			status = usb_ep_clear_halt (data->ep);
+			break;
+		default:
+			status = -ENOTTY;
+		}
+	} else
+		status = -ENODEV;
+	spin_unlock_irq (&data->dev->lock);
+	mutex_unlock(&data->lock);
+	return status;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
+
+struct kiocb_priv {
+	struct usb_request	*req;
+	struct ep_data		*epdata;
+	void			*buf;
+	const struct iovec	*iv;
+	unsigned long		nr_segs;
+	unsigned		actual;
+};
+
+static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
+{
+	struct kiocb_priv	*priv = iocb->private;
+	struct ep_data		*epdata;
+	int			value;
+
+	local_irq_disable();
+	epdata = priv->epdata;
+	// spin_lock(&epdata->dev->lock);
+	kiocbSetCancelled(iocb);
+	if (likely(epdata && epdata->ep && priv->req))
+		value = usb_ep_dequeue (epdata->ep, priv->req);
+	else
+		value = -EINVAL;
+	// spin_unlock(&epdata->dev->lock);
+	local_irq_enable();
+
+	aio_put_req(iocb);
+	return value;
+}
+
+static ssize_t ep_aio_read_retry(struct kiocb *iocb)
+{
+	struct kiocb_priv	*priv = iocb->private;
+	ssize_t			len, total;
+	void			*to_copy;
+	int			i;
+
+	/* we "retry" to get the right mm context for this: */
+
+	/* copy stuff into user buffers */
+	total = priv->actual;
+	len = 0;
+	to_copy = priv->buf;
+	for (i=0; i < priv->nr_segs; i++) {
+		ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
+
+		if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
+			if (len == 0)
+				len = -EFAULT;
+			break;
+		}
+
+		total -= this;
+		len += this;
+		to_copy += this;
+		if (total == 0)
+			break;
+	}
+	kfree(priv->buf);
+	kfree(priv->iv);
+	kfree(priv);
+	return len;
+}
+
+static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct kiocb		*iocb = req->context;
+	struct kiocb_priv	*priv = iocb->private;
+	struct ep_data		*epdata = priv->epdata;
+
+	/* lock against disconnect (and ideally, cancel) */
+	spin_lock(&epdata->dev->lock);
+	priv->req = NULL;
+	priv->epdata = NULL;
+
+	/* if this was a write or a read returning no data then we
+	 * don't need to copy anything to userspace, so we can
+	 * complete the aio request immediately.
+	 */
+	if (priv->iv == NULL || unlikely(req->actual == 0)) {
+		kfree(req->buf);
+		kfree(priv->iv);
+		kfree(priv);
+		iocb->private = NULL;
+		/* aio_complete() reports bytes-transferred _and_ faults */
+		aio_complete(iocb, req->actual ? req->actual : req->status,
+				req->status);
+	} else {
+		/* retry() won't report both; so we hide some faults */
+		if (unlikely(0 != req->status))
+			DBG(epdata->dev, "%s fault %d len %d\n",
+				ep->name, req->status, req->actual);
+
+		priv->buf = req->buf;
+		priv->actual = req->actual;
+		kick_iocb(iocb);
+	}
+	spin_unlock(&epdata->dev->lock);
+
+	usb_ep_free_request(ep, req);
+	put_ep(epdata);
+}
+
+static ssize_t
+ep_aio_rwtail(
+	struct kiocb	*iocb,
+	char		*buf,
+	size_t		len,
+	struct ep_data	*epdata,
+	const struct iovec *iv,
+	unsigned long	nr_segs
+)
+{
+	struct kiocb_priv	*priv;
+	struct usb_request	*req;
+	ssize_t			value;
+
+	priv = kzalloc(sizeof *priv, GFP_KERNEL);
+	if (!priv) {
+		value = -ENOMEM;
+fail:
+		kfree(buf);
+		return value;
+	}
+	iocb->private = priv;
+	if (iv) {
+		priv->iv = kmemdup(iv, nr_segs * sizeof(struct iovec),
+				   GFP_KERNEL);
+		if (!priv->iv) {
+			kfree(priv);
+			goto fail;
+		}
+	}
+	priv->nr_segs = nr_segs;
+
+	value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
+	if (unlikely(value < 0)) {
+		kfree(priv->iv);
+		kfree(priv);
+		goto fail;
+	}
+
+	iocb->ki_cancel = ep_aio_cancel;
+	get_ep(epdata);
+	priv->epdata = epdata;
+	priv->actual = 0;
+
+	/* each kiocb is coupled to one usb_request, but we can't
+	 * allocate or submit those if the host disconnected.
+	 */
+	spin_lock_irq(&epdata->dev->lock);
+	if (likely(epdata->ep)) {
+		req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
+		if (likely(req)) {
+			priv->req = req;
+			req->buf = buf;
+			req->length = len;
+			req->complete = ep_aio_complete;
+			req->context = iocb;
+			value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
+			if (unlikely(0 != value))
+				usb_ep_free_request(epdata->ep, req);
+		} else
+			value = -EAGAIN;
+	} else
+		value = -ENODEV;
+	spin_unlock_irq(&epdata->dev->lock);
+
+	mutex_unlock(&epdata->lock);
+
+	if (unlikely(value)) {
+		kfree(priv->iv);
+		kfree(priv);
+		put_ep(epdata);
+	} else
+		value = (iv ? -EIOCBRETRY : -EIOCBQUEUED);
+	return value;
+}
+
+static ssize_t
+ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
+		unsigned long nr_segs, loff_t o)
+{
+	struct ep_data		*epdata = iocb->ki_filp->private_data;
+	char			*buf;
+
+	if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
+		return -EINVAL;
+
+	buf = kmalloc(iocb->ki_left, GFP_KERNEL);
+	if (unlikely(!buf))
+		return -ENOMEM;
+
+	iocb->ki_retry = ep_aio_read_retry;
+	return ep_aio_rwtail(iocb, buf, iocb->ki_left, epdata, iov, nr_segs);
+}
+
+static ssize_t
+ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
+		unsigned long nr_segs, loff_t o)
+{
+	struct ep_data		*epdata = iocb->ki_filp->private_data;
+	char			*buf;
+	size_t			len = 0;
+	int			i = 0;
+
+	if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
+		return -EINVAL;
+
+	buf = kmalloc(iocb->ki_left, GFP_KERNEL);
+	if (unlikely(!buf))
+		return -ENOMEM;
+
+	for (i=0; i < nr_segs; i++) {
+		if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
+				iov[i].iov_len) != 0)) {
+			kfree(buf);
+			return -EFAULT;
+		}
+		len += iov[i].iov_len;
+	}
+	return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* used after endpoint configuration */
+static const struct file_operations ep_io_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.read =		ep_read,
+	.write =	ep_write,
+	.unlocked_ioctl = ep_ioctl,
+	.release =	ep_release,
+
+	.aio_read =	ep_aio_read,
+	.aio_write =	ep_aio_write,
+};
+
+/* ENDPOINT INITIALIZATION
+ *
+ *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
+ *     status = write (fd, descriptors, sizeof descriptors)
+ *
+ * That write establishes the endpoint configuration, configuring
+ * the controller to process bulk, interrupt, or isochronous transfers
+ * at the right maxpacket size, and so on.
+ *
+ * The descriptors are message type 1, identified by a host order u32
+ * at the beginning of what's written.  Descriptor order is: full/low
+ * speed descriptor, then optional high speed descriptor.
+ */
+static ssize_t
+ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct ep_data		*data = fd->private_data;
+	struct usb_ep		*ep;
+	u32			tag;
+	int			value, length = len;
+
+	value = mutex_lock_interruptible(&data->lock);
+	if (value < 0)
+		return value;
+
+	if (data->state != STATE_EP_READY) {
+		value = -EL2HLT;
+		goto fail;
+	}
+
+	value = len;
+	if (len < USB_DT_ENDPOINT_SIZE + 4)
+		goto fail0;
+
+	/* we might need to change message format someday */
+	if (copy_from_user (&tag, buf, 4)) {
+		goto fail1;
+	}
+	if (tag != 1) {
+		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
+		goto fail0;
+	}
+	buf += 4;
+	len -= 4;
+
+	/* NOTE:  audio endpoint extensions not accepted here;
+	 * just don't include the extra bytes.
+	 */
+
+	/* full/low speed descriptor, then high speed */
+	if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
+		goto fail1;
+	}
+	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
+			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
+		goto fail0;
+	if (len != USB_DT_ENDPOINT_SIZE) {
+		if (len != 2 * USB_DT_ENDPOINT_SIZE)
+			goto fail0;
+		if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
+					USB_DT_ENDPOINT_SIZE)) {
+			goto fail1;
+		}
+		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
+				|| data->hs_desc.bDescriptorType
+					!= USB_DT_ENDPOINT) {
+			DBG(data->dev, "config %s, bad hs length or type\n",
+					data->name);
+			goto fail0;
+		}
+	}
+
+	spin_lock_irq (&data->dev->lock);
+	if (data->dev->state == STATE_DEV_UNBOUND) {
+		value = -ENOENT;
+		goto gone;
+	} else if ((ep = data->ep) == NULL) {
+		value = -ENODEV;
+		goto gone;
+	}
+	switch (data->dev->gadget->speed) {
+	case USB_SPEED_LOW:
+	case USB_SPEED_FULL:
+		ep->desc = &data->desc;
+		value = usb_ep_enable(ep);
+		if (value == 0)
+			data->state = STATE_EP_ENABLED;
+		break;
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+	case USB_SPEED_HIGH:
+		/* fails if caller didn't provide that descriptor... */
+		ep->desc = &data->hs_desc;
+		value = usb_ep_enable(ep);
+		if (value == 0)
+			data->state = STATE_EP_ENABLED;
+		break;
+#endif
+	default:
+		DBG(data->dev, "unconnected, %s init abandoned\n",
+				data->name);
+		value = -EINVAL;
+	}
+	if (value == 0) {
+		fd->f_op = &ep_io_operations;
+		value = length;
+	}
+gone:
+	spin_unlock_irq (&data->dev->lock);
+	if (value < 0) {
+fail:
+		data->desc.bDescriptorType = 0;
+		data->hs_desc.bDescriptorType = 0;
+	}
+	mutex_unlock(&data->lock);
+	return value;
+fail0:
+	value = -EINVAL;
+	goto fail;
+fail1:
+	value = -EFAULT;
+	goto fail;
+}
+
+static int
+ep_open (struct inode *inode, struct file *fd)
+{
+	struct ep_data		*data = inode->i_private;
+	int			value = -EBUSY;
+
+	if (mutex_lock_interruptible(&data->lock) != 0)
+		return -EINTR;
+	spin_lock_irq (&data->dev->lock);
+	if (data->dev->state == STATE_DEV_UNBOUND)
+		value = -ENOENT;
+	else if (data->state == STATE_EP_DISABLED) {
+		value = 0;
+		data->state = STATE_EP_READY;
+		get_ep (data);
+		fd->private_data = data;
+		VDEBUG (data->dev, "%s ready\n", data->name);
+	} else
+		DBG (data->dev, "%s state %d\n",
+			data->name, data->state);
+	spin_unlock_irq (&data->dev->lock);
+	mutex_unlock(&data->lock);
+	return value;
+}
+
+/* used before endpoint configuration */
+static const struct file_operations ep_config_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.open =		ep_open,
+	.write =	ep_config,
+	.release =	ep_release,
+};
+
+/*----------------------------------------------------------------------*/
+
+/* EP0 IMPLEMENTATION can be partly in userspace.
+ *
+ * Drivers that use this facility receive various events, including
+ * control requests the kernel doesn't handle.  Drivers that don't
+ * use this facility may be too simple-minded for real applications.
+ */
+
+static inline void ep0_readable (struct dev_data *dev)
+{
+	wake_up (&dev->wait);
+	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
+}
+
+static void clean_req (struct usb_ep *ep, struct usb_request *req)
+{
+	struct dev_data		*dev = ep->driver_data;
+
+	if (req->buf != dev->rbuf) {
+		kfree(req->buf);
+		req->buf = dev->rbuf;
+		req->dma = DMA_ADDR_INVALID;
+	}
+	req->complete = epio_complete;
+	dev->setup_out_ready = 0;
+}
+
+static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
+{
+	struct dev_data		*dev = ep->driver_data;
+	unsigned long		flags;
+	int			free = 1;
+
+	/* for control OUT, data must still get to userspace */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (!dev->setup_in) {
+		dev->setup_out_error = (req->status != 0);
+		if (!dev->setup_out_error)
+			free = 0;
+		dev->setup_out_ready = 1;
+		ep0_readable (dev);
+	}
+
+	/* clean up as appropriate */
+	if (free && req->buf != &dev->rbuf)
+		clean_req (ep, req);
+	req->complete = epio_complete;
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
+{
+	struct dev_data	*dev = ep->driver_data;
+
+	if (dev->setup_out_ready) {
+		DBG (dev, "ep0 request busy!\n");
+		return -EBUSY;
+	}
+	if (len > sizeof (dev->rbuf))
+		req->buf = kmalloc(len, GFP_ATOMIC);
+	if (req->buf == NULL) {
+		req->buf = dev->rbuf;
+		return -ENOMEM;
+	}
+	req->complete = ep0_complete;
+	req->length = len;
+	req->zero = 0;
+	return 0;
+}
+
+static ssize_t
+ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+{
+	struct dev_data			*dev = fd->private_data;
+	ssize_t				retval;
+	enum ep0_state			state;
+
+	spin_lock_irq (&dev->lock);
+
+	/* report fd mode change before acting on it */
+	if (dev->setup_abort) {
+		dev->setup_abort = 0;
+		retval = -EIDRM;
+		goto done;
+	}
+
+	/* control DATA stage */
+	if ((state = dev->state) == STATE_DEV_SETUP) {
+
+		if (dev->setup_in) {		/* stall IN */
+			VDEBUG(dev, "ep0in stall\n");
+			(void) usb_ep_set_halt (dev->gadget->ep0);
+			retval = -EL2HLT;
+			dev->state = STATE_DEV_CONNECTED;
+
+		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
+			struct usb_ep		*ep = dev->gadget->ep0;
+			struct usb_request	*req = dev->req;
+
+			if ((retval = setup_req (ep, req, 0)) == 0)
+				retval = usb_ep_queue (ep, req, GFP_ATOMIC);
+			dev->state = STATE_DEV_CONNECTED;
+
+			/* assume that was SET_CONFIGURATION */
+			if (dev->current_config) {
+				unsigned power;
+
+				if (gadget_is_dualspeed(dev->gadget)
+						&& (dev->gadget->speed
+							== USB_SPEED_HIGH))
+					power = dev->hs_config->bMaxPower;
+				else
+					power = dev->config->bMaxPower;
+				usb_gadget_vbus_draw(dev->gadget, 2 * power);
+			}
+
+		} else {			/* collect OUT data */
+			if ((fd->f_flags & O_NONBLOCK) != 0
+					&& !dev->setup_out_ready) {
+				retval = -EAGAIN;
+				goto done;
+			}
+			spin_unlock_irq (&dev->lock);
+			retval = wait_event_interruptible (dev->wait,
+					dev->setup_out_ready != 0);
+
+			/* FIXME state could change from under us */
+			spin_lock_irq (&dev->lock);
+			if (retval)
+				goto done;
+
+			if (dev->state != STATE_DEV_SETUP) {
+				retval = -ECANCELED;
+				goto done;
+			}
+			dev->state = STATE_DEV_CONNECTED;
+
+			if (dev->setup_out_error)
+				retval = -EIO;
+			else {
+				len = min (len, (size_t)dev->req->actual);
+// FIXME don't call this with the spinlock held ...
+				if (copy_to_user (buf, dev->req->buf, len))
+					retval = -EFAULT;
+				else
+					retval = len;
+				clean_req (dev->gadget->ep0, dev->req);
+				/* NOTE userspace can't yet choose to stall */
+			}
+		}
+		goto done;
+	}
+
+	/* else normal: return event data */
+	if (len < sizeof dev->event [0]) {
+		retval = -EINVAL;
+		goto done;
+	}
+	len -= len % sizeof (struct usb_gadgetfs_event);
+	dev->usermode_setup = 1;
+
+scan:
+	/* return queued events right away */
+	if (dev->ev_next != 0) {
+		unsigned		i, n;
+
+		n = len / sizeof (struct usb_gadgetfs_event);
+		if (dev->ev_next < n)
+			n = dev->ev_next;
+
+		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
+		for (i = 0; i < n; i++) {
+			if (dev->event [i].type == GADGETFS_SETUP) {
+				dev->state = STATE_DEV_SETUP;
+				n = i + 1;
+				break;
+			}
+		}
+		spin_unlock_irq (&dev->lock);
+		len = n * sizeof (struct usb_gadgetfs_event);
+		if (copy_to_user (buf, &dev->event, len))
+			retval = -EFAULT;
+		else
+			retval = len;
+		if (len > 0) {
+			/* NOTE this doesn't guard against broken drivers;
+			 * concurrent ep0 readers may lose events.
+			 */
+			spin_lock_irq (&dev->lock);
+			if (dev->ev_next > n) {
+				memmove(&dev->event[0], &dev->event[n],
+					sizeof (struct usb_gadgetfs_event)
+						* (dev->ev_next - n));
+			}
+			dev->ev_next -= n;
+			spin_unlock_irq (&dev->lock);
+		}
+		return retval;
+	}
+	if (fd->f_flags & O_NONBLOCK) {
+		retval = -EAGAIN;
+		goto done;
+	}
+
+	switch (state) {
+	default:
+		DBG (dev, "fail %s, state %d\n", __func__, state);
+		retval = -ESRCH;
+		break;
+	case STATE_DEV_UNCONNECTED:
+	case STATE_DEV_CONNECTED:
+		spin_unlock_irq (&dev->lock);
+		DBG (dev, "%s wait\n", __func__);
+
+		/* wait for events */
+		retval = wait_event_interruptible (dev->wait,
+				dev->ev_next != 0);
+		if (retval < 0)
+			return retval;
+		spin_lock_irq (&dev->lock);
+		goto scan;
+	}
+
+done:
+	spin_unlock_irq (&dev->lock);
+	return retval;
+}
+
+static struct usb_gadgetfs_event *
+next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
+{
+	struct usb_gadgetfs_event	*event;
+	unsigned			i;
+
+	switch (type) {
+	/* these events purge the queue */
+	case GADGETFS_DISCONNECT:
+		if (dev->state == STATE_DEV_SETUP)
+			dev->setup_abort = 1;
+		// FALL THROUGH
+	case GADGETFS_CONNECT:
+		dev->ev_next = 0;
+		break;
+	case GADGETFS_SETUP:		/* previous request timed out */
+	case GADGETFS_SUSPEND:		/* same effect */
+		/* these events can't be repeated */
+		for (i = 0; i != dev->ev_next; i++) {
+			if (dev->event [i].type != type)
+				continue;
+			DBG(dev, "discard old event[%d] %d\n", i, type);
+			dev->ev_next--;
+			if (i == dev->ev_next)
+				break;
+			/* indices start at zero, for simplicity */
+			memmove (&dev->event [i], &dev->event [i + 1],
+				sizeof (struct usb_gadgetfs_event)
+					* (dev->ev_next - i));
+		}
+		break;
+	default:
+		BUG ();
+	}
+	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
+	event = &dev->event [dev->ev_next++];
+	BUG_ON (dev->ev_next > N_EVENT);
+	memset (event, 0, sizeof *event);
+	event->type = type;
+	return event;
+}
+
+static ssize_t
+ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct dev_data		*dev = fd->private_data;
+	ssize_t			retval = -ESRCH;
+
+	spin_lock_irq (&dev->lock);
+
+	/* report fd mode change before acting on it */
+	if (dev->setup_abort) {
+		dev->setup_abort = 0;
+		retval = -EIDRM;
+
+	/* data and/or status stage for control request */
+	} else if (dev->state == STATE_DEV_SETUP) {
+
+		/* IN DATA+STATUS caller makes len <= wLength */
+		if (dev->setup_in) {
+			retval = setup_req (dev->gadget->ep0, dev->req, len);
+			if (retval == 0) {
+				dev->state = STATE_DEV_CONNECTED;
+				spin_unlock_irq (&dev->lock);
+				if (copy_from_user (dev->req->buf, buf, len))
+					retval = -EFAULT;
+				else {
+					if (len < dev->setup_wLength)
+						dev->req->zero = 1;
+					retval = usb_ep_queue (
+						dev->gadget->ep0, dev->req,
+						GFP_KERNEL);
+				}
+				if (retval < 0) {
+					spin_lock_irq (&dev->lock);
+					clean_req (dev->gadget->ep0, dev->req);
+					spin_unlock_irq (&dev->lock);
+				} else
+					retval = len;
+
+				return retval;
+			}
+
+		/* can stall some OUT transfers */
+		} else if (dev->setup_can_stall) {
+			VDEBUG(dev, "ep0out stall\n");
+			(void) usb_ep_set_halt (dev->gadget->ep0);
+			retval = -EL2HLT;
+			dev->state = STATE_DEV_CONNECTED;
+		} else {
+			DBG(dev, "bogus ep0out stall!\n");
+		}
+	} else
+		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
+
+	spin_unlock_irq (&dev->lock);
+	return retval;
+}
+
+static int
+ep0_fasync (int f, struct file *fd, int on)
+{
+	struct dev_data		*dev = fd->private_data;
+	// caller must F_SETOWN before signal delivery happens
+	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
+	return fasync_helper (f, fd, on, &dev->fasync);
+}
+
+static struct usb_gadget_driver gadgetfs_driver;
+
+static int
+dev_release (struct inode *inode, struct file *fd)
+{
+	struct dev_data		*dev = fd->private_data;
+
+	/* closing ep0 === shutdown all */
+
+	usb_gadget_unregister_driver (&gadgetfs_driver);
+
+	/* at this point "good" hardware has disconnected the
+	 * device from USB; the host won't see it any more.
+	 * alternatively, all host requests will time out.
+	 */
+
+	kfree (dev->buf);
+	dev->buf = NULL;
+	put_dev (dev);
+
+	/* other endpoints were all decoupled from this device */
+	spin_lock_irq(&dev->lock);
+	dev->state = STATE_DEV_DISABLED;
+	spin_unlock_irq(&dev->lock);
+	return 0;
+}
+
+static unsigned int
+ep0_poll (struct file *fd, poll_table *wait)
+{
+       struct dev_data         *dev = fd->private_data;
+       int                     mask = 0;
+
+       poll_wait(fd, &dev->wait, wait);
+
+       spin_lock_irq (&dev->lock);
+
+       /* report fd mode change before acting on it */
+       if (dev->setup_abort) {
+               dev->setup_abort = 0;
+               mask = POLLHUP;
+               goto out;
+       }
+
+       if (dev->state == STATE_DEV_SETUP) {
+               if (dev->setup_in || dev->setup_can_stall)
+                       mask = POLLOUT;
+       } else {
+               if (dev->ev_next != 0)
+                       mask = POLLIN;
+       }
+out:
+       spin_unlock_irq(&dev->lock);
+       return mask;
+}
+
+static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
+{
+	struct dev_data		*dev = fd->private_data;
+	struct usb_gadget	*gadget = dev->gadget;
+	long ret = -ENOTTY;
+
+	if (gadget->ops->ioctl)
+		ret = gadget->ops->ioctl (gadget, code, value);
+
+	return ret;
+}
+
+/* used after device configuration */
+static const struct file_operations ep0_io_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.read =		ep0_read,
+	.write =	ep0_write,
+	.fasync =	ep0_fasync,
+	.poll =		ep0_poll,
+	.unlocked_ioctl =	dev_ioctl,
+	.release =	dev_release,
+};
+
+/*----------------------------------------------------------------------*/
+
+/* The in-kernel gadget driver handles most ep0 issues, in particular
+ * enumerating the single configuration (as provided from user space).
+ *
+ * Unrecognized ep0 requests may be handled in user space.
+ */
+
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+static void make_qualifier (struct dev_data *dev)
+{
+	struct usb_qualifier_descriptor		qual;
+	struct usb_device_descriptor		*desc;
+
+	qual.bLength = sizeof qual;
+	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
+	qual.bcdUSB = cpu_to_le16 (0x0200);
+
+	desc = dev->dev;
+	qual.bDeviceClass = desc->bDeviceClass;
+	qual.bDeviceSubClass = desc->bDeviceSubClass;
+	qual.bDeviceProtocol = desc->bDeviceProtocol;
+
+	/* assumes ep0 uses the same value for both speeds ... */
+	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
+
+	qual.bNumConfigurations = 1;
+	qual.bRESERVED = 0;
+
+	memcpy (dev->rbuf, &qual, sizeof qual);
+}
+#endif
+
+static int
+config_buf (struct dev_data *dev, u8 type, unsigned index)
+{
+	int		len;
+	int		hs = 0;
+
+	/* only one configuration */
+	if (index > 0)
+		return -EINVAL;
+
+	if (gadget_is_dualspeed(dev->gadget)) {
+		hs = (dev->gadget->speed == USB_SPEED_HIGH);
+		if (type == USB_DT_OTHER_SPEED_CONFIG)
+			hs = !hs;
+	}
+	if (hs) {
+		dev->req->buf = dev->hs_config;
+		len = le16_to_cpu(dev->hs_config->wTotalLength);
+	} else {
+		dev->req->buf = dev->config;
+		len = le16_to_cpu(dev->config->wTotalLength);
+	}
+	((u8 *)dev->req->buf) [1] = type;
+	return len;
+}
+
+static int
+gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+{
+	struct dev_data			*dev = get_gadget_data (gadget);
+	struct usb_request		*req = dev->req;
+	int				value = -EOPNOTSUPP;
+	struct usb_gadgetfs_event	*event;
+	u16				w_value = le16_to_cpu(ctrl->wValue);
+	u16				w_length = le16_to_cpu(ctrl->wLength);
+
+	spin_lock (&dev->lock);
+	dev->setup_abort = 0;
+	if (dev->state == STATE_DEV_UNCONNECTED) {
+		if (gadget_is_dualspeed(gadget)
+				&& gadget->speed == USB_SPEED_HIGH
+				&& dev->hs_config == NULL) {
+			spin_unlock(&dev->lock);
+			ERROR (dev, "no high speed config??\n");
+			return -EINVAL;
+		}
+
+		dev->state = STATE_DEV_CONNECTED;
+
+		INFO (dev, "connected\n");
+		event = next_event (dev, GADGETFS_CONNECT);
+		event->u.speed = gadget->speed;
+		ep0_readable (dev);
+
+	/* host may have given up waiting for response.  we can miss control
+	 * requests handled lower down (device/endpoint status and features);
+	 * then ep0_{read,write} will report the wrong status. controller
+	 * driver will have aborted pending i/o.
+	 */
+	} else if (dev->state == STATE_DEV_SETUP)
+		dev->setup_abort = 1;
+
+	req->buf = dev->rbuf;
+	req->dma = DMA_ADDR_INVALID;
+	req->context = NULL;
+	value = -EOPNOTSUPP;
+	switch (ctrl->bRequest) {
+
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			goto unrecognized;
+		switch (w_value >> 8) {
+
+		case USB_DT_DEVICE:
+			value = min (w_length, (u16) sizeof *dev->dev);
+			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
+			req->buf = dev->dev;
+			break;
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+		case USB_DT_DEVICE_QUALIFIER:
+			if (!dev->hs_config)
+				break;
+			value = min (w_length, (u16)
+				sizeof (struct usb_qualifier_descriptor));
+			make_qualifier (dev);
+			break;
+		case USB_DT_OTHER_SPEED_CONFIG:
+			// FALLTHROUGH
+#endif
+		case USB_DT_CONFIG:
+			value = config_buf (dev,
+					w_value >> 8,
+					w_value & 0xff);
+			if (value >= 0)
+				value = min (w_length, (u16) value);
+			break;
+		case USB_DT_STRING:
+			goto unrecognized;
+
+		default:		// all others are errors
+			break;
+		}
+		break;
+
+	/* currently one config, two speeds */
+	case USB_REQ_SET_CONFIGURATION:
+		if (ctrl->bRequestType != 0)
+			goto unrecognized;
+		if (0 == (u8) w_value) {
+			value = 0;
+			dev->current_config = 0;
+			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
+			// user mode expected to disable endpoints
+		} else {
+			u8	config, power;
+
+			if (gadget_is_dualspeed(gadget)
+					&& gadget->speed == USB_SPEED_HIGH) {
+				config = dev->hs_config->bConfigurationValue;
+				power = dev->hs_config->bMaxPower;
+			} else {
+				config = dev->config->bConfigurationValue;
+				power = dev->config->bMaxPower;
+			}
+
+			if (config == (u8) w_value) {
+				value = 0;
+				dev->current_config = config;
+				usb_gadget_vbus_draw(gadget, 2 * power);
+			}
+		}
+
+		/* report SET_CONFIGURATION like any other control request,
+		 * except that usermode may not stall this.  the next
+		 * request mustn't be allowed start until this finishes:
+		 * endpoints and threads set up, etc.
+		 *
+		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
+		 * has bad/racey automagic that prevents synchronizing here.
+		 * even kernel mode drivers often miss them.
+		 */
+		if (value == 0) {
+			INFO (dev, "configuration #%d\n", dev->current_config);
+			if (dev->usermode_setup) {
+				dev->setup_can_stall = 0;
+				goto delegate;
+			}
+		}
+		break;
+
+#ifndef	CONFIG_USB_PXA25X
+	/* PXA automagically handles this request too */
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != 0x80)
+			goto unrecognized;
+		*(u8 *)req->buf = dev->current_config;
+		value = min (w_length, (u16) 1);
+		break;
+#endif
+
+	default:
+unrecognized:
+		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
+			dev->usermode_setup ? "delegate" : "fail",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, le16_to_cpu(ctrl->wIndex), w_length);
+
+		/* if there's an ep0 reader, don't stall */
+		if (dev->usermode_setup) {
+			dev->setup_can_stall = 1;
+delegate:
+			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
+						? 1 : 0;
+			dev->setup_wLength = w_length;
+			dev->setup_out_ready = 0;
+			dev->setup_out_error = 0;
+			value = 0;
+
+			/* read DATA stage for OUT right away */
+			if (unlikely (!dev->setup_in && w_length)) {
+				value = setup_req (gadget->ep0, dev->req,
+							w_length);
+				if (value < 0)
+					break;
+				value = usb_ep_queue (gadget->ep0, dev->req,
+							GFP_ATOMIC);
+				if (value < 0) {
+					clean_req (gadget->ep0, dev->req);
+					break;
+				}
+
+				/* we can't currently stall these */
+				dev->setup_can_stall = 0;
+			}
+
+			/* state changes when reader collects event */
+			event = next_event (dev, GADGETFS_SETUP);
+			event->u.setup = *ctrl;
+			ep0_readable (dev);
+			spin_unlock (&dev->lock);
+			return 0;
+		}
+	}
+
+	/* proceed with data transfer and status phases? */
+	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
+		req->length = value;
+		req->zero = value < w_length;
+		value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DBG (dev, "ep_queue --> %d\n", value);
+			req->status = 0;
+		}
+	}
+
+	/* device stalls when value < 0 */
+	spin_unlock (&dev->lock);
+	return value;
+}
+
+static void destroy_ep_files (struct dev_data *dev)
+{
+	DBG (dev, "%s %d\n", __func__, dev->state);
+
+	/* dev->state must prevent interference */
+	spin_lock_irq (&dev->lock);
+	while (!list_empty(&dev->epfiles)) {
+		struct ep_data	*ep;
+		struct inode	*parent;
+		struct dentry	*dentry;
+
+		/* break link to FS */
+		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
+		list_del_init (&ep->epfiles);
+		dentry = ep->dentry;
+		ep->dentry = NULL;
+		parent = dentry->d_parent->d_inode;
+
+		/* break link to controller */
+		if (ep->state == STATE_EP_ENABLED)
+			(void) usb_ep_disable (ep->ep);
+		ep->state = STATE_EP_UNBOUND;
+		usb_ep_free_request (ep->ep, ep->req);
+		ep->ep = NULL;
+		wake_up (&ep->wait);
+		put_ep (ep);
+
+		spin_unlock_irq (&dev->lock);
+
+		/* break link to dcache */
+		mutex_lock (&parent->i_mutex);
+		d_delete (dentry);
+		dput (dentry);
+		mutex_unlock (&parent->i_mutex);
+
+		spin_lock_irq (&dev->lock);
+	}
+	spin_unlock_irq (&dev->lock);
+}
+
+
+static struct inode *
+gadgetfs_create_file (struct super_block *sb, char const *name,
+		void *data, const struct file_operations *fops,
+		struct dentry **dentry_p);
+
+static int activate_ep_files (struct dev_data *dev)
+{
+	struct usb_ep	*ep;
+	struct ep_data	*data;
+
+	gadget_for_each_ep (ep, dev->gadget) {
+
+		data = kzalloc(sizeof(*data), GFP_KERNEL);
+		if (!data)
+			goto enomem0;
+		data->state = STATE_EP_DISABLED;
+		mutex_init(&data->lock);
+		init_waitqueue_head (&data->wait);
+
+		strncpy (data->name, ep->name, sizeof (data->name) - 1);
+		atomic_set (&data->count, 1);
+		data->dev = dev;
+		get_dev (dev);
+
+		data->ep = ep;
+		ep->driver_data = data;
+
+		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
+		if (!data->req)
+			goto enomem1;
+
+		data->inode = gadgetfs_create_file (dev->sb, data->name,
+				data, &ep_config_operations,
+				&data->dentry);
+		if (!data->inode)
+			goto enomem2;
+		list_add_tail (&data->epfiles, &dev->epfiles);
+	}
+	return 0;
+
+enomem2:
+	usb_ep_free_request (ep, data->req);
+enomem1:
+	put_dev (dev);
+	kfree (data);
+enomem0:
+	DBG (dev, "%s enomem\n", __func__);
+	destroy_ep_files (dev);
+	return -ENOMEM;
+}
+
+static void
+gadgetfs_unbind (struct usb_gadget *gadget)
+{
+	struct dev_data		*dev = get_gadget_data (gadget);
+
+	DBG (dev, "%s\n", __func__);
+
+	spin_lock_irq (&dev->lock);
+	dev->state = STATE_DEV_UNBOUND;
+	spin_unlock_irq (&dev->lock);
+
+	destroy_ep_files (dev);
+	gadget->ep0->driver_data = NULL;
+	set_gadget_data (gadget, NULL);
+
+	/* we've already been disconnected ... no i/o is active */
+	if (dev->req)
+		usb_ep_free_request (gadget->ep0, dev->req);
+	DBG (dev, "%s done\n", __func__);
+	put_dev (dev);
+}
+
+static struct dev_data		*the_device;
+
+static int
+gadgetfs_bind (struct usb_gadget *gadget)
+{
+	struct dev_data		*dev = the_device;
+
+	if (!dev)
+		return -ESRCH;
+	if (0 != strcmp (CHIP, gadget->name)) {
+		pr_err("%s expected %s controller not %s\n",
+			shortname, CHIP, gadget->name);
+		return -ENODEV;
+	}
+
+	set_gadget_data (gadget, dev);
+	dev->gadget = gadget;
+	gadget->ep0->driver_data = dev;
+
+	/* preallocate control response and buffer */
+	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
+	if (!dev->req)
+		goto enomem;
+	dev->req->context = NULL;
+	dev->req->complete = epio_complete;
+
+	if (activate_ep_files (dev) < 0)
+		goto enomem;
+
+	INFO (dev, "bound to %s driver\n", gadget->name);
+	spin_lock_irq(&dev->lock);
+	dev->state = STATE_DEV_UNCONNECTED;
+	spin_unlock_irq(&dev->lock);
+	get_dev (dev);
+	return 0;
+
+enomem:
+	gadgetfs_unbind (gadget);
+	return -ENOMEM;
+}
+
+static void
+gadgetfs_disconnect (struct usb_gadget *gadget)
+{
+	struct dev_data		*dev = get_gadget_data (gadget);
+	unsigned long		flags;
+
+	spin_lock_irqsave (&dev->lock, flags);
+	if (dev->state == STATE_DEV_UNCONNECTED)
+		goto exit;
+	dev->state = STATE_DEV_UNCONNECTED;
+
+	INFO (dev, "disconnected\n");
+	next_event (dev, GADGETFS_DISCONNECT);
+	ep0_readable (dev);
+exit:
+	spin_unlock_irqrestore (&dev->lock, flags);
+}
+
+static void
+gadgetfs_suspend (struct usb_gadget *gadget)
+{
+	struct dev_data		*dev = get_gadget_data (gadget);
+
+	INFO (dev, "suspended from state %d\n", dev->state);
+	spin_lock (&dev->lock);
+	switch (dev->state) {
+	case STATE_DEV_SETUP:		// VERY odd... host died??
+	case STATE_DEV_CONNECTED:
+	case STATE_DEV_UNCONNECTED:
+		next_event (dev, GADGETFS_SUSPEND);
+		ep0_readable (dev);
+		/* FALLTHROUGH */
+	default:
+		break;
+	}
+	spin_unlock (&dev->lock);
+}
+
+static struct usb_gadget_driver gadgetfs_driver = {
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+	.max_speed	= USB_SPEED_HIGH,
+#else
+	.max_speed	= USB_SPEED_FULL,
+#endif
+	.function	= (char *) driver_desc,
+	.unbind		= gadgetfs_unbind,
+	.setup		= gadgetfs_setup,
+	.disconnect	= gadgetfs_disconnect,
+	.suspend	= gadgetfs_suspend,
+
+	.driver	= {
+		.name		= (char *) shortname,
+	},
+};
+
+/*----------------------------------------------------------------------*/
+
+static void gadgetfs_nop(struct usb_gadget *arg) { }
+
+static int gadgetfs_probe (struct usb_gadget *gadget)
+{
+	CHIP = gadget->name;
+	return -EISNAM;
+}
+
+static struct usb_gadget_driver probe_driver = {
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= gadgetfs_nop,
+	.setup		= (void *)gadgetfs_nop,
+	.disconnect	= gadgetfs_nop,
+	.driver	= {
+		.name		= "nop",
+	},
+};
+
+
+/* DEVICE INITIALIZATION
+ *
+ *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
+ *     status = write (fd, descriptors, sizeof descriptors)
+ *
+ * That write establishes the device configuration, so the kernel can
+ * bind to the controller ... guaranteeing it can handle enumeration
+ * at all necessary speeds.  Descriptor order is:
+ *
+ * . message tag (u32, host order) ... for now, must be zero; it
+ *	would change to support features like multi-config devices
+ * . full/low speed config ... all wTotalLength bytes (with interface,
+ *	class, altsetting, endpoint, and other descriptors)
+ * . high speed config ... all descriptors, for high speed operation;
+ *	this one's optional except for high-speed hardware
+ * . device descriptor
+ *
+ * Endpoints are not yet enabled. Drivers must wait until device
+ * configuration and interface altsetting changes create
+ * the need to configure (or unconfigure) them.
+ *
+ * After initialization, the device stays active for as long as that
+ * $CHIP file is open.  Events must then be read from that descriptor,
+ * such as configuration notifications.
+ */
+
+static int is_valid_config (struct usb_config_descriptor *config)
+{
+	return config->bDescriptorType == USB_DT_CONFIG
+		&& config->bLength == USB_DT_CONFIG_SIZE
+		&& config->bConfigurationValue != 0
+		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
+		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
+	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
+	/* FIXME check lengths: walk to end */
+}
+
+static ssize_t
+dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct dev_data		*dev = fd->private_data;
+	ssize_t			value = len, length = len;
+	unsigned		total;
+	u32			tag;
+	char			*kbuf;
+
+	if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
+		return -EINVAL;
+
+	/* we might need to change message format someday */
+	if (copy_from_user (&tag, buf, 4))
+		return -EFAULT;
+	if (tag != 0)
+		return -EINVAL;
+	buf += 4;
+	length -= 4;
+
+	kbuf = memdup_user(buf, length);
+	if (IS_ERR(kbuf))
+		return PTR_ERR(kbuf);
+
+	spin_lock_irq (&dev->lock);
+	value = -EINVAL;
+	if (dev->buf)
+		goto fail;
+	dev->buf = kbuf;
+
+	/* full or low speed config */
+	dev->config = (void *) kbuf;
+	total = le16_to_cpu(dev->config->wTotalLength);
+	if (!is_valid_config (dev->config) || total >= length)
+		goto fail;
+	kbuf += total;
+	length -= total;
+
+	/* optional high speed config */
+	if (kbuf [1] == USB_DT_CONFIG) {
+		dev->hs_config = (void *) kbuf;
+		total = le16_to_cpu(dev->hs_config->wTotalLength);
+		if (!is_valid_config (dev->hs_config) || total >= length)
+			goto fail;
+		kbuf += total;
+		length -= total;
+	}
+
+	/* could support multiple configs, using another encoding! */
+
+	/* device descriptor (tweaked for paranoia) */
+	if (length != USB_DT_DEVICE_SIZE)
+		goto fail;
+	dev->dev = (void *)kbuf;
+	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
+			|| dev->dev->bDescriptorType != USB_DT_DEVICE
+			|| dev->dev->bNumConfigurations != 1)
+		goto fail;
+	dev->dev->bNumConfigurations = 1;
+	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
+
+	/* triggers gadgetfs_bind(); then we can enumerate. */
+	spin_unlock_irq (&dev->lock);
+	value = usb_gadget_probe_driver(&gadgetfs_driver, gadgetfs_bind);
+	if (value != 0) {
+		kfree (dev->buf);
+		dev->buf = NULL;
+	} else {
+		/* at this point "good" hardware has for the first time
+		 * let the USB the host see us.  alternatively, if users
+		 * unplug/replug that will clear all the error state.
+		 *
+		 * note:  everything running before here was guaranteed
+		 * to choke driver model style diagnostics.  from here
+		 * on, they can work ... except in cleanup paths that
+		 * kick in after the ep0 descriptor is closed.
+		 */
+		fd->f_op = &ep0_io_operations;
+		value = len;
+	}
+	return value;
+
+fail:
+	spin_unlock_irq (&dev->lock);
+	pr_debug ("%s: %s fail %Zd, %p\n", shortname, __func__, value, dev);
+	kfree (dev->buf);
+	dev->buf = NULL;
+	return value;
+}
+
+static int
+dev_open (struct inode *inode, struct file *fd)
+{
+	struct dev_data		*dev = inode->i_private;
+	int			value = -EBUSY;
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_DEV_DISABLED) {
+		dev->ev_next = 0;
+		dev->state = STATE_DEV_OPENED;
+		fd->private_data = dev;
+		get_dev (dev);
+		value = 0;
+	}
+	spin_unlock_irq(&dev->lock);
+	return value;
+}
+
+static const struct file_operations dev_init_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.open =		dev_open,
+	.write =	dev_config,
+	.fasync =	ep0_fasync,
+	.unlocked_ioctl = dev_ioctl,
+	.release =	dev_release,
+};
+
+/*----------------------------------------------------------------------*/
+
+/* FILESYSTEM AND SUPERBLOCK OPERATIONS
+ *
+ * Mounting the filesystem creates a controller file, used first for
+ * device configuration then later for event monitoring.
+ */
+
+
+/* FIXME PAM etc could set this security policy without mount options
+ * if epfiles inherited ownership and permissons from ep0 ...
+ */
+
+static unsigned default_uid;
+static unsigned default_gid;
+static unsigned default_perm = S_IRUSR | S_IWUSR;
+
+module_param (default_uid, uint, 0644);
+module_param (default_gid, uint, 0644);
+module_param (default_perm, uint, 0644);
+
+
+static struct inode *
+gadgetfs_make_inode (struct super_block *sb,
+		void *data, const struct file_operations *fops,
+		int mode)
+{
+	struct inode *inode = new_inode (sb);
+
+	if (inode) {
+		inode->i_ino = get_next_ino();
+		inode->i_mode = mode;
+		inode->i_uid = default_uid;
+		inode->i_gid = default_gid;
+		inode->i_atime = inode->i_mtime = inode->i_ctime
+				= CURRENT_TIME;
+		inode->i_private = data;
+		inode->i_fop = fops;
+	}
+	return inode;
+}
+
+/* creates in fs root directory, so non-renamable and non-linkable.
+ * so inode and dentry are paired, until device reconfig.
+ */
+static struct inode *
+gadgetfs_create_file (struct super_block *sb, char const *name,
+		void *data, const struct file_operations *fops,
+		struct dentry **dentry_p)
+{
+	struct dentry	*dentry;
+	struct inode	*inode;
+
+	dentry = d_alloc_name(sb->s_root, name);
+	if (!dentry)
+		return NULL;
+
+	inode = gadgetfs_make_inode (sb, data, fops,
+			S_IFREG | (default_perm & S_IRWXUGO));
+	if (!inode) {
+		dput(dentry);
+		return NULL;
+	}
+	d_add (dentry, inode);
+	*dentry_p = dentry;
+	return inode;
+}
+
+static const struct super_operations gadget_fs_operations = {
+	.statfs =	simple_statfs,
+	.drop_inode =	generic_delete_inode,
+};
+
+static int
+gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
+{
+	struct inode	*inode;
+	struct dev_data	*dev;
+
+	if (the_device)
+		return -ESRCH;
+
+	/* fake probe to determine $CHIP */
+	(void) usb_gadget_probe_driver(&probe_driver, gadgetfs_probe);
+	if (!CHIP)
+		return -ENODEV;
+
+	/* superblock */
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic = GADGETFS_MAGIC;
+	sb->s_op = &gadget_fs_operations;
+	sb->s_time_gran = 1;
+
+	/* root inode */
+	inode = gadgetfs_make_inode (sb,
+			NULL, &simple_dir_operations,
+			S_IFDIR | S_IRUGO | S_IXUGO);
+	if (!inode)
+		goto Enomem;
+	inode->i_op = &simple_dir_inode_operations;
+	if (!(sb->s_root = d_make_root (inode)))
+		goto Enomem;
+
+	/* the ep0 file is named after the controller we expect;
+	 * user mode code can use it for sanity checks, like we do.
+	 */
+	dev = dev_new ();
+	if (!dev)
+		goto Enomem;
+
+	dev->sb = sb;
+	if (!gadgetfs_create_file (sb, CHIP,
+				dev, &dev_init_operations,
+				&dev->dentry)) {
+		put_dev(dev);
+		goto Enomem;
+	}
+
+	/* other endpoint files are available after hardware setup,
+	 * from binding to a controller.
+	 */
+	the_device = dev;
+	return 0;
+
+Enomem:
+	return -ENOMEM;
+}
+
+/* "mount -t gadgetfs path /dev/gadget" ends up here */
+static struct dentry *
+gadgetfs_mount (struct file_system_type *t, int flags,
+		const char *path, void *opts)
+{
+	return mount_single (t, flags, opts, gadgetfs_fill_super);
+}
+
+static void
+gadgetfs_kill_sb (struct super_block *sb)
+{
+	kill_litter_super (sb);
+	if (the_device) {
+		put_dev (the_device);
+		the_device = NULL;
+	}
+}
+
+/*----------------------------------------------------------------------*/
+
+static struct file_system_type gadgetfs_type = {
+	.owner		= THIS_MODULE,
+	.name		= shortname,
+	.mount		= gadgetfs_mount,
+	.kill_sb	= gadgetfs_kill_sb,
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __init init (void)
+{
+	int status;
+
+	status = register_filesystem (&gadgetfs_type);
+	if (status == 0)
+		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
+			shortname, driver_desc);
+	return status;
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	pr_debug ("unregister %s\n", shortname);
+	unregister_filesystem (&gadgetfs_type);
+}
+module_exit (cleanup);
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/langwell_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/langwell_udc.c
new file mode 100644
index 0000000..f9cedd5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/langwell_udc.c
@@ -0,0 +1,3434 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+
+/* #undef	DEBUG */
+/* #undef	VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/unaligned.h>
+
+#include "langwell_udc.h"
+
+
+#define	DRIVER_DESC		"Intel Langwell USB Device Controller driver"
+#define	DRIVER_VERSION		"16 May 2009"
+
+static const char driver_name[] = "langwell_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor
+langwell_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
+};
+
+
+/*-------------------------------------------------------------------------*/
+/* debugging */
+
+#ifdef	VERBOSE_DEBUG
+static inline void print_all_registers(struct langwell_udc *dev)
+{
+	int	i;
+
+	/* Capability Registers */
+	dev_dbg(&dev->pdev->dev,
+		"Capability Registers (offset: 0x%04x, length: 0x%08x)\n",
+		CAP_REG_OFFSET, (u32)sizeof(struct langwell_cap_regs));
+	dev_dbg(&dev->pdev->dev, "caplength=0x%02x\n",
+			readb(&dev->cap_regs->caplength));
+	dev_dbg(&dev->pdev->dev, "hciversion=0x%04x\n",
+			readw(&dev->cap_regs->hciversion));
+	dev_dbg(&dev->pdev->dev, "hcsparams=0x%08x\n",
+			readl(&dev->cap_regs->hcsparams));
+	dev_dbg(&dev->pdev->dev, "hccparams=0x%08x\n",
+			readl(&dev->cap_regs->hccparams));
+	dev_dbg(&dev->pdev->dev, "dciversion=0x%04x\n",
+			readw(&dev->cap_regs->dciversion));
+	dev_dbg(&dev->pdev->dev, "dccparams=0x%08x\n",
+			readl(&dev->cap_regs->dccparams));
+
+	/* Operational Registers */
+	dev_dbg(&dev->pdev->dev,
+		"Operational Registers (offset: 0x%04x, length: 0x%08x)\n",
+		OP_REG_OFFSET, (u32)sizeof(struct langwell_op_regs));
+	dev_dbg(&dev->pdev->dev, "extsts=0x%08x\n",
+			readl(&dev->op_regs->extsts));
+	dev_dbg(&dev->pdev->dev, "extintr=0x%08x\n",
+			readl(&dev->op_regs->extintr));
+	dev_dbg(&dev->pdev->dev, "usbcmd=0x%08x\n",
+			readl(&dev->op_regs->usbcmd));
+	dev_dbg(&dev->pdev->dev, "usbsts=0x%08x\n",
+			readl(&dev->op_regs->usbsts));
+	dev_dbg(&dev->pdev->dev, "usbintr=0x%08x\n",
+			readl(&dev->op_regs->usbintr));
+	dev_dbg(&dev->pdev->dev, "frindex=0x%08x\n",
+			readl(&dev->op_regs->frindex));
+	dev_dbg(&dev->pdev->dev, "ctrldssegment=0x%08x\n",
+			readl(&dev->op_regs->ctrldssegment));
+	dev_dbg(&dev->pdev->dev, "deviceaddr=0x%08x\n",
+			readl(&dev->op_regs->deviceaddr));
+	dev_dbg(&dev->pdev->dev, "endpointlistaddr=0x%08x\n",
+			readl(&dev->op_regs->endpointlistaddr));
+	dev_dbg(&dev->pdev->dev, "ttctrl=0x%08x\n",
+			readl(&dev->op_regs->ttctrl));
+	dev_dbg(&dev->pdev->dev, "burstsize=0x%08x\n",
+			readl(&dev->op_regs->burstsize));
+	dev_dbg(&dev->pdev->dev, "txfilltuning=0x%08x\n",
+			readl(&dev->op_regs->txfilltuning));
+	dev_dbg(&dev->pdev->dev, "txttfilltuning=0x%08x\n",
+			readl(&dev->op_regs->txttfilltuning));
+	dev_dbg(&dev->pdev->dev, "ic_usb=0x%08x\n",
+			readl(&dev->op_regs->ic_usb));
+	dev_dbg(&dev->pdev->dev, "ulpi_viewport=0x%08x\n",
+			readl(&dev->op_regs->ulpi_viewport));
+	dev_dbg(&dev->pdev->dev, "configflag=0x%08x\n",
+			readl(&dev->op_regs->configflag));
+	dev_dbg(&dev->pdev->dev, "portsc1=0x%08x\n",
+			readl(&dev->op_regs->portsc1));
+	dev_dbg(&dev->pdev->dev, "devlc=0x%08x\n",
+			readl(&dev->op_regs->devlc));
+	dev_dbg(&dev->pdev->dev, "otgsc=0x%08x\n",
+			readl(&dev->op_regs->otgsc));
+	dev_dbg(&dev->pdev->dev, "usbmode=0x%08x\n",
+			readl(&dev->op_regs->usbmode));
+	dev_dbg(&dev->pdev->dev, "endptnak=0x%08x\n",
+			readl(&dev->op_regs->endptnak));
+	dev_dbg(&dev->pdev->dev, "endptnaken=0x%08x\n",
+			readl(&dev->op_regs->endptnaken));
+	dev_dbg(&dev->pdev->dev, "endptsetupstat=0x%08x\n",
+			readl(&dev->op_regs->endptsetupstat));
+	dev_dbg(&dev->pdev->dev, "endptprime=0x%08x\n",
+			readl(&dev->op_regs->endptprime));
+	dev_dbg(&dev->pdev->dev, "endptflush=0x%08x\n",
+			readl(&dev->op_regs->endptflush));
+	dev_dbg(&dev->pdev->dev, "endptstat=0x%08x\n",
+			readl(&dev->op_regs->endptstat));
+	dev_dbg(&dev->pdev->dev, "endptcomplete=0x%08x\n",
+			readl(&dev->op_regs->endptcomplete));
+
+	for (i = 0; i < dev->ep_max / 2; i++) {
+		dev_dbg(&dev->pdev->dev, "endptctrl[%d]=0x%08x\n",
+				i, readl(&dev->op_regs->endptctrl[i]));
+	}
+}
+#else
+
+#define	print_all_registers(dev)	do { } while (0)
+
+#endif /* VERBOSE_DEBUG */
+
+
+/*-------------------------------------------------------------------------*/
+
+#define	is_in(ep)	(((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir ==	\
+			USB_DIR_IN) : (usb_endpoint_dir_in((ep)->desc)))
+
+#define	DIR_STRING(ep)	(is_in(ep) ? "in" : "out")
+
+
+static char *type_string(const struct usb_endpoint_descriptor *desc)
+{
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_BULK:
+		return "bulk";
+	case USB_ENDPOINT_XFER_ISOC:
+		return "iso";
+	case USB_ENDPOINT_XFER_INT:
+		return "int";
+	};
+
+	return "control";
+}
+
+
+/* configure endpoint control registers */
+static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
+		unsigned char is_in, unsigned char ep_type)
+{
+	struct langwell_udc	*dev;
+	u32			endptctrl;
+
+	dev = ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+	if (is_in) {	/* TX */
+		if (ep_num)
+			endptctrl |= EPCTRL_TXR;
+		endptctrl |= EPCTRL_TXE;
+		endptctrl |= ep_type << EPCTRL_TXT_SHIFT;
+	} else {	/* RX */
+		if (ep_num)
+			endptctrl |= EPCTRL_RXR;
+		endptctrl |= EPCTRL_RXE;
+		endptctrl |= ep_type << EPCTRL_RXT_SHIFT;
+	}
+
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* reset ep0 dQH and endptctrl */
+static void ep0_reset(struct langwell_udc *dev)
+{
+	struct langwell_ep	*ep;
+	int			i;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* ep0 in and out */
+	for (i = 0; i < 2; i++) {
+		ep = &dev->ep[i];
+		ep->dev = dev;
+
+		/* ep0 dQH */
+		ep->dqh = &dev->ep_dqh[i];
+
+		/* configure ep0 endpoint capabilities in dQH */
+		ep->dqh->dqh_ios = 1;
+		ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
+
+		/* enable ep0-in HW zero length termination select */
+		if (is_in(ep))
+			ep->dqh->dqh_zlt = 0;
+		ep->dqh->dqh_mult = 0;
+
+		ep->dqh->dtd_next = DTD_TERM;
+
+		/* configure ep0 control registers */
+		ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
+	}
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoints operations */
+
+/* configure endpoint, making it usable */
+static int langwell_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct langwell_udc	*dev;
+	struct langwell_ep	*ep;
+	u16			max = 0;
+	unsigned long		flags;
+	int			i, retval = 0;
+	unsigned char		zlt, ios = 0, mult = 0;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	max = usb_endpoint_maxp(desc);
+
+	/*
+	 * disable HW zero length termination select
+	 * driver handles zero length packet through req->req.zero
+	 */
+	zlt = 1;
+
+	/*
+	 * sanity check type, direction, address, and then
+	 * initialize the endpoint capabilities fields in dQH
+	 */
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		ios = 1;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		if ((dev->gadget.speed == USB_SPEED_HIGH
+					&& max != 512)
+				|| (dev->gadget.speed == USB_SPEED_FULL
+					&& max > 64)) {
+			goto done;
+		}
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
+			goto done;
+
+		switch (dev->gadget.speed) {
+		case USB_SPEED_HIGH:
+			if (max <= 1024)
+				break;
+		case USB_SPEED_FULL:
+			if (max <= 64)
+				break;
+		default:
+			if (max <= 8)
+				break;
+			goto done;
+		}
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (strstr(ep->ep.name, "-bulk")
+				|| strstr(ep->ep.name, "-int"))
+			goto done;
+
+		switch (dev->gadget.speed) {
+		case USB_SPEED_HIGH:
+			if (max <= 1024)
+				break;
+		case USB_SPEED_FULL:
+			if (max <= 1023)
+				break;
+		default:
+			goto done;
+		}
+		/*
+		 * FIXME:
+		 * calculate transactions needed for high bandwidth iso
+		 */
+		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+		max = max & 0x8ff;	/* bit 0~10 */
+		/* 3 transactions at most */
+		if (mult > 3)
+			goto done;
+		break;
+	default:
+		goto done;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	ep->ep.maxpacket = max;
+	ep->desc = desc;
+	ep->stopped = 0;
+	ep->ep_num = usb_endpoint_num(desc);
+
+	/* ep_type */
+	ep->ep_type = usb_endpoint_type(desc);
+
+	/* configure endpoint control registers */
+	ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
+
+	/* configure endpoint capabilities in dQH */
+	i = ep->ep_num * 2 + is_in(ep);
+	ep->dqh = &dev->ep_dqh[i];
+	ep->dqh->dqh_ios = ios;
+	ep->dqh->dqh_mpl = cpu_to_le16(max);
+	ep->dqh->dqh_zlt = zlt;
+	ep->dqh->dqh_mult = mult;
+	ep->dqh->dtd_next = DTD_TERM;
+
+	dev_dbg(&dev->pdev->dev, "enabled %s (ep%d%s-%s), max %04x\n",
+			_ep->name,
+			ep->ep_num,
+			DIR_STRING(ep),
+			type_string(desc),
+			max);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+done:
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* retire a request */
+static void done(struct langwell_ep *ep, struct langwell_request *req,
+		int status)
+{
+	struct langwell_udc	*dev = ep->dev;
+	unsigned		stopped = ep->stopped;
+	struct langwell_dtd	*curr_dtd, *next_dtd;
+	int			i;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* remove the req from ep->queue */
+	list_del_init(&req->queue);
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	/* free dTD for the request */
+	next_dtd = req->head;
+	for (i = 0; i < req->dtd_count; i++) {
+		curr_dtd = next_dtd;
+		if (i != req->dtd_count - 1)
+			next_dtd = curr_dtd->next_dtd_virt;
+		dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma);
+	}
+
+	usb_gadget_unmap_request(&dev->gadget, &req->req, is_in(ep));
+
+	if (status != -ESHUTDOWN)
+		dev_dbg(&dev->pdev->dev,
+				"complete %s, req %p, stat %d, len %u/%u\n",
+				ep->ep.name, &req->req, status,
+				req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+
+	spin_unlock(&dev->lock);
+	/* complete routine from gadget driver */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&dev->lock);
+	ep->stopped = stopped;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+static void langwell_ep_fifo_flush(struct usb_ep *_ep);
+
+/* delete all endpoint requests, called with spinlock held */
+static void nuke(struct langwell_ep *ep, int status)
+{
+	/* called with spinlock held */
+	ep->stopped = 1;
+
+	/* endpoint fifo flush */
+	if (&ep->ep && ep->desc)
+		langwell_ep_fifo_flush(&ep->ep);
+
+	while (!list_empty(&ep->queue)) {
+		struct langwell_request	*req = NULL;
+		req = list_entry(ep->queue.next, struct langwell_request,
+				queue);
+		done(ep, req, status);
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint is no longer usable */
+static int langwell_ep_disable(struct usb_ep *_ep)
+{
+	struct langwell_ep	*ep;
+	unsigned long		flags;
+	struct langwell_udc	*dev;
+	int			ep_num;
+	u32			endptctrl;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* disable endpoint control register */
+	ep_num = ep->ep_num;
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+	if (is_in(ep))
+		endptctrl &= ~EPCTRL_TXE;
+	else
+		endptctrl &= ~EPCTRL_RXE;
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	/* nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->stopped = 1;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	dev_dbg(&dev->pdev->dev, "disabled %s\n", _ep->name);
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
+	return 0;
+}
+
+
+/* allocate a request object to use with this endpoint */
+static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
+		gfp_t gfp_flags)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	struct langwell_request	*req = NULL;
+
+	if (!_ep)
+		return NULL;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+
+	dev_vdbg(&dev->pdev->dev, "alloc request for %s\n", _ep->name);
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return &req->req;
+}
+
+
+/* free a request object */
+static void langwell_free_request(struct usb_ep *_ep,
+		struct usb_request *_req)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	struct langwell_request	*req = NULL;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (!_ep || !_req)
+		return;
+
+	req = container_of(_req, struct langwell_request, req);
+	WARN_ON(!list_empty(&req->queue));
+
+	if (_req)
+		kfree(req);
+
+	dev_vdbg(&dev->pdev->dev, "free request for %s\n", _ep->name);
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* queue dTD and PRIME endpoint */
+static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
+{
+	u32			bit_mask, usbcmd, endptstat, dtd_dma;
+	u8			dtd_status;
+	int			i;
+	struct langwell_dqh	*dqh;
+	struct langwell_udc	*dev;
+
+	dev = ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	i = ep->ep_num * 2 + is_in(ep);
+	dqh = &dev->ep_dqh[i];
+
+	if (ep->ep_num)
+		dev_vdbg(&dev->pdev->dev, "%s\n", ep->name);
+	else
+		/* ep0 */
+		dev_vdbg(&dev->pdev->dev, "%s-%s\n", ep->name, DIR_STRING(ep));
+
+	dev_vdbg(&dev->pdev->dev, "ep_dqh[%d] addr: 0x%p\n",
+			i, &(dev->ep_dqh[i]));
+
+	bit_mask = is_in(ep) ?
+		(1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
+
+	dev_vdbg(&dev->pdev->dev, "bit_mask = 0x%08x\n", bit_mask);
+
+	/* check if the pipe is empty */
+	if (!(list_empty(&ep->queue))) {
+		/* add dTD to the end of linked list */
+		struct langwell_request	*lastreq;
+		lastreq = list_entry(ep->queue.prev,
+				struct langwell_request, queue);
+
+		lastreq->tail->dtd_next =
+			cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK);
+
+		/* read prime bit, if 1 goto out */
+		if (readl(&dev->op_regs->endptprime) & bit_mask)
+			goto out;
+
+		do {
+			/* set ATDTW bit in USBCMD */
+			usbcmd = readl(&dev->op_regs->usbcmd);
+			writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd);
+
+			/* read correct status bit */
+			endptstat = readl(&dev->op_regs->endptstat) & bit_mask;
+
+		} while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW));
+
+		/* write ATDTW bit to 0 */
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd);
+
+		if (endptstat)
+			goto out;
+	}
+
+	/* write dQH next pointer and terminate bit to 0 */
+	dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK;
+	dqh->dtd_next = cpu_to_le32(dtd_dma);
+
+	/* clear active and halt bit */
+	dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
+	dqh->dtd_status &= dtd_status;
+	dev_vdbg(&dev->pdev->dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
+
+	/* ensure that updates to the dQH will occur before priming */
+	wmb();
+
+	/* write 1 to endptprime register to PRIME endpoint */
+	bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
+	dev_vdbg(&dev->pdev->dev, "endprime bit_mask = 0x%08x\n", bit_mask);
+	writel(bit_mask, &dev->op_regs->endptprime);
+out:
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* fill in the dTD structure to build a transfer descriptor */
+static struct langwell_dtd *build_dtd(struct langwell_request *req,
+		unsigned *length, dma_addr_t *dma, int *is_last)
+{
+	u32			 buf_ptr;
+	struct langwell_dtd	*dtd;
+	struct langwell_udc	*dev;
+	int			i;
+
+	dev = req->ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* the maximum transfer length, up to 16k bytes */
+	*length = min(req->req.length - req->req.actual,
+			(unsigned)DTD_MAX_TRANSFER_LENGTH);
+
+	/* create dTD dma_pool resource */
+	dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma);
+	if (dtd == NULL)
+		return dtd;
+	dtd->dtd_dma = *dma;
+
+	/* initialize buffer page pointers */
+	buf_ptr = (u32)(req->req.dma + req->req.actual);
+	for (i = 0; i < 5; i++)
+		dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE);
+
+	req->req.actual += *length;
+
+	/* fill in total bytes with transfer size */
+	dtd->dtd_total = cpu_to_le16(*length);
+	dev_vdbg(&dev->pdev->dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
+
+	/* set is_last flag if req->req.zero is set or not */
+	if (req->req.zero) {
+		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+			*is_last = 1;
+		else
+			*is_last = 0;
+	} else if (req->req.length == req->req.actual) {
+		*is_last = 1;
+	} else
+		*is_last = 0;
+
+	if (*is_last == 0)
+		dev_vdbg(&dev->pdev->dev, "multi-dtd request!\n");
+
+	/* set interrupt on complete bit for the last dTD */
+	if (*is_last && !req->req.no_interrupt)
+		dtd->dtd_ioc = 1;
+
+	/* set multiplier override 0 for non-ISO and non-TX endpoint */
+	dtd->dtd_multo = 0;
+
+	/* set the active bit of status field to 1 */
+	dtd->dtd_status = DTD_STS_ACTIVE;
+	dev_vdbg(&dev->pdev->dev, "dtd->dtd_status = 0x%02x\n",
+			dtd->dtd_status);
+
+	dev_vdbg(&dev->pdev->dev, "length = %d, dma addr= 0x%08x\n",
+			*length, (int)*dma);
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return dtd;
+}
+
+
+/* generate dTD linked list for a request */
+static int req_to_dtd(struct langwell_request *req)
+{
+	unsigned		count;
+	int			is_last, is_first = 1;
+	struct langwell_dtd	*dtd, *last_dtd = NULL;
+	struct langwell_udc	*dev;
+	dma_addr_t		dma;
+
+	dev = req->ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+	do {
+		dtd = build_dtd(req, &count, &dma, &is_last);
+		if (dtd == NULL)
+			return -ENOMEM;
+
+		if (is_first) {
+			is_first = 0;
+			req->head = dtd;
+		} else {
+			last_dtd->dtd_next = cpu_to_le32(dma);
+			last_dtd->next_dtd_virt = dtd;
+		}
+		last_dtd = dtd;
+		req->dtd_count++;
+	} while (!is_last);
+
+	/* set terminate bit to 1 for the last dTD */
+	dtd->dtd_next = DTD_TERM;
+
+	req->tail = dtd;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* queue (submits) an I/O requests to an endpoint */
+static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+		gfp_t gfp_flags)
+{
+	struct langwell_request	*req;
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	unsigned long		flags;
+	int			is_iso = 0;
+	int			ret;
+
+	/* always require a cpu-view buffer */
+	req = container_of(_req, struct langwell_request, req);
+	ep = container_of(_ep, struct langwell_ep, ep);
+
+	if (!_req || !_req->complete || !_req->buf
+			|| !list_empty(&req->queue)) {
+		return -EINVAL;
+	}
+
+	if (unlikely(!_ep || !ep->desc))
+		return -EINVAL;
+
+	dev = ep->dev;
+	req->ep = ep;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (usb_endpoint_xfer_isoc(ep->desc)) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+		is_iso = 1;
+	}
+
+	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+
+	/* set up dma mapping */
+	ret = usb_gadget_map_request(&dev->gadget, &req->req, is_in(ep));
+	if (ret)
+		return ret;
+
+	dev_dbg(&dev->pdev->dev,
+			"%s queue req %p, len %u, buf %p, dma 0x%08x\n",
+			_ep->name,
+			_req, _req->length, _req->buf, (int)_req->dma);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+	req->dtd_count = 0;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* build and put dTDs to endpoint queue */
+	if (!req_to_dtd(req)) {
+		queue_dtd(ep, req);
+	} else {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -ENOMEM;
+	}
+
+	/* update ep0 state */
+	if (ep->ep_num == 0)
+		dev->ep0_state = DATA_STATE_XMIT;
+
+	if (likely(req != NULL)) {
+		list_add_tail(&req->queue, &ep->queue);
+		dev_vdbg(&dev->pdev->dev, "list_add_tail()\n");
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* dequeue (cancels, unlinks) an I/O request from an endpoint */
+static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	struct langwell_request	*req;
+	unsigned long		flags;
+	int			stopped, ep_num, retval = 0;
+	u32			endptctrl;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc || !_req)
+		return -EINVAL;
+
+	if (!dev->driver)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	stopped = ep->stopped;
+
+	/* quiesce dma while we patch the queue */
+	ep->stopped = 1;
+	ep_num = ep->ep_num;
+
+	/* disable endpoint control register */
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+	if (is_in(ep))
+		endptctrl &= ~EPCTRL_TXE;
+	else
+		endptctrl &= ~EPCTRL_RXE;
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	/* make sure it's still queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+
+	if (&req->req != _req) {
+		retval = -EINVAL;
+		goto done;
+	}
+
+	/* queue head may be partially complete. */
+	if (ep->queue.next == &req->queue) {
+		dev_dbg(&dev->pdev->dev, "unlink (%s) dma\n", _ep->name);
+		_req->status = -ECONNRESET;
+		langwell_ep_fifo_flush(&ep->ep);
+
+		/* not the last request in endpoint queue */
+		if (likely(ep->queue.next == &req->queue)) {
+			struct langwell_dqh	*dqh;
+			struct langwell_request	*next_req;
+
+			dqh = ep->dqh;
+			next_req = list_entry(req->queue.next,
+					struct langwell_request, queue);
+
+			/* point the dQH to the first dTD of next request */
+			writel((u32) next_req->head, &dqh->dqh_current);
+		}
+	} else {
+		struct langwell_request	*prev_req;
+
+		prev_req = list_entry(req->queue.prev,
+				struct langwell_request, queue);
+		writel(readl(&req->tail->dtd_next),
+				&prev_req->tail->dtd_next);
+	}
+
+	done(ep, req, -ECONNRESET);
+
+done:
+	/* enable endpoint again */
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+	if (is_in(ep))
+		endptctrl |= EPCTRL_TXE;
+	else
+		endptctrl |= EPCTRL_RXE;
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	ep->stopped = stopped;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint set/clear halt */
+static void ep_set_halt(struct langwell_ep *ep, int value)
+{
+	u32			endptctrl = 0;
+	int			ep_num;
+	struct langwell_udc	*dev = ep->dev;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	ep_num = ep->ep_num;
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+
+	/* value: 1 - set halt, 0 - clear halt */
+	if (value) {
+		/* set the stall bit */
+		if (is_in(ep))
+			endptctrl |= EPCTRL_TXS;
+		else
+			endptctrl |= EPCTRL_RXS;
+	} else {
+		/* clear the stall bit and reset data toggle */
+		if (is_in(ep)) {
+			endptctrl &= ~EPCTRL_TXS;
+			endptctrl |= EPCTRL_TXR;
+		} else {
+			endptctrl &= ~EPCTRL_RXS;
+			endptctrl |= EPCTRL_RXR;
+		}
+	}
+
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* set the endpoint halt feature */
+static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	unsigned long		flags;
+	int			retval = 0;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc)
+		return -EINVAL;
+
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	if (usb_endpoint_xfer_isoc(ep->desc))
+		return  -EOPNOTSUPP;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/*
+	 * attempt to halt IN ep will fail if any transfer requests
+	 * are still queue
+	 */
+	if (!list_empty(&ep->queue) && is_in(ep) && value) {
+		/* IN endpoint FIFO holds bytes */
+		dev_dbg(&dev->pdev->dev, "%s FIFO holds bytes\n", _ep->name);
+		retval = -EAGAIN;
+		goto done;
+	}
+
+	/* endpoint set/clear halt */
+	if (ep->ep_num) {
+		ep_set_halt(ep, value);
+	} else { /* endpoint 0 */
+		dev->ep0_state = WAIT_FOR_SETUP;
+		dev->ep0_dir = USB_DIR_OUT;
+	}
+done:
+	spin_unlock_irqrestore(&dev->lock, flags);
+	dev_dbg(&dev->pdev->dev, "%s %s halt\n",
+			_ep->name, value ? "set" : "clear");
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/* set the halt feature and ignores clear requests */
+static int langwell_ep_set_wedge(struct usb_ep *_ep)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc)
+		return -EINVAL;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return usb_ep_set_halt(_ep);
+}
+
+
+/* flush contents of a fifo */
+static void langwell_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	u32			flush_bit;
+	unsigned long		timeout;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc) {
+		dev_vdbg(&dev->pdev->dev, "ep or ep->desc is NULL\n");
+		dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+		return;
+	}
+
+	dev_vdbg(&dev->pdev->dev, "%s-%s fifo flush\n",
+			_ep->name, DIR_STRING(ep));
+
+	/* flush endpoint buffer */
+	if (ep->ep_num == 0)
+		flush_bit = (1 << 16) | 1;
+	else if (is_in(ep))
+		flush_bit = 1 << (ep->ep_num + 16);	/* TX */
+	else
+		flush_bit = 1 << ep->ep_num;		/* RX */
+
+	/* wait until flush complete */
+	timeout = jiffies + FLUSH_TIMEOUT;
+	do {
+		writel(flush_bit, &dev->op_regs->endptflush);
+		while (readl(&dev->op_regs->endptflush)) {
+			if (time_after(jiffies, timeout)) {
+				dev_err(&dev->pdev->dev, "ep flush timeout\n");
+				goto done;
+			}
+			cpu_relax();
+		}
+	} while (readl(&dev->op_regs->endptstat) & flush_bit);
+done:
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* endpoints operations structure */
+static const struct usb_ep_ops langwell_ep_ops = {
+
+	/* configure endpoint, making it usable */
+	.enable		= langwell_ep_enable,
+
+	/* endpoint is no longer usable */
+	.disable	= langwell_ep_disable,
+
+	/* allocate a request object to use with this endpoint */
+	.alloc_request	= langwell_alloc_request,
+
+	/* free a request object */
+	.free_request	= langwell_free_request,
+
+	/* queue (submits) an I/O requests to an endpoint */
+	.queue		= langwell_ep_queue,
+
+	/* dequeue (cancels, unlinks) an I/O request from an endpoint */
+	.dequeue	= langwell_ep_dequeue,
+
+	/* set the endpoint halt feature */
+	.set_halt	= langwell_ep_set_halt,
+
+	/* set the halt feature and ignores clear requests */
+	.set_wedge	= langwell_ep_set_wedge,
+
+	/* flush contents of a fifo */
+	.fifo_flush	= langwell_ep_fifo_flush,
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device controller usb_gadget_ops structure */
+
+/* returns the current frame number */
+static int langwell_get_frame(struct usb_gadget *_gadget)
+{
+	struct langwell_udc	*dev;
+	u16			retval;
+
+	if (!_gadget)
+		return -ENODEV;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/* enter or exit PHY low power state */
+static void langwell_phy_low_power(struct langwell_udc *dev, bool flag)
+{
+	u32		devlc;
+	u8		devlc_byte2;
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	devlc = readl(&dev->op_regs->devlc);
+	dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
+
+	if (flag)
+		devlc |= LPM_PHCD;
+	else
+		devlc &= ~LPM_PHCD;
+
+	/* FIXME: workaround for Langwell A1/A2/A3 sighting */
+	devlc_byte2 = (devlc >> 16) & 0xff;
+	writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
+
+	devlc = readl(&dev->op_regs->devlc);
+	dev_vdbg(&dev->pdev->dev,
+			"%s PHY low power suspend, devlc = 0x%08x\n",
+			flag ? "enter" : "exit", devlc);
+}
+
+
+/* tries to wake up the host connected to this gadget */
+static int langwell_wakeup(struct usb_gadget *_gadget)
+{
+	struct langwell_udc	*dev;
+	u32			portsc1;
+	unsigned long		flags;
+
+	if (!_gadget)
+		return 0;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* remote wakeup feature not enabled by host */
+	if (!dev->remote_wakeup) {
+		dev_info(&dev->pdev->dev, "remote wakeup is disabled\n");
+		return -ENOTSUPP;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	portsc1 = readl(&dev->op_regs->portsc1);
+	if (!(portsc1 & PORTS_SUSP)) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return 0;
+	}
+
+	/* LPM L1 to L0 or legacy remote wakeup */
+	if (dev->lpm && dev->lpm_state == LPM_L1)
+		dev_info(&dev->pdev->dev, "LPM L1 to L0 remote wakeup\n");
+	else
+		dev_info(&dev->pdev->dev, "device remote wakeup\n");
+
+	/* exit PHY low power suspend */
+	if (dev->pdev->device != 0x0829)
+		langwell_phy_low_power(dev, 0);
+
+	/* force port resume */
+	portsc1 |= PORTS_FPR;
+	writel(portsc1, &dev->op_regs->portsc1);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* notify controller that VBUS is powered or not */
+static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct langwell_udc	*dev;
+	unsigned long		flags;
+	u32			usbcmd;
+
+	if (!_gadget)
+		return -ENODEV;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev_vdbg(&dev->pdev->dev, "VBUS status: %s\n",
+			is_active ? "on" : "off");
+
+	dev->vbus_active = (is_active != 0);
+	if (dev->driver && dev->softconnected && dev->vbus_active) {
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		usbcmd |= CMD_RUNSTOP;
+		writel(usbcmd, &dev->op_regs->usbcmd);
+	} else {
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		usbcmd &= ~CMD_RUNSTOP;
+		writel(usbcmd, &dev->op_regs->usbcmd);
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* constrain controller's VBUS power usage */
+static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+	struct langwell_udc	*dev;
+
+	if (!_gadget)
+		return -ENODEV;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (dev->transceiver) {
+		dev_vdbg(&dev->pdev->dev, "usb_phy_set_power\n");
+		dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+		return usb_phy_set_power(dev->transceiver, mA);
+	}
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return -ENOTSUPP;
+}
+
+
+/* D+ pullup, software-controlled connect/disconnect to USB host */
+static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
+{
+	struct langwell_udc	*dev;
+	u32			usbcmd;
+	unsigned long		flags;
+
+	if (!_gadget)
+		return -ENODEV;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->softconnected = (is_on != 0);
+
+	if (dev->driver && dev->softconnected && dev->vbus_active) {
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		usbcmd |= CMD_RUNSTOP;
+		writel(usbcmd, &dev->op_regs->usbcmd);
+	} else {
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		usbcmd &= ~CMD_RUNSTOP;
+		writel(usbcmd, &dev->op_regs->usbcmd);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+static int langwell_start(struct usb_gadget *g,
+		struct usb_gadget_driver *driver);
+
+static int langwell_stop(struct usb_gadget *g,
+		struct usb_gadget_driver *driver);
+
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops langwell_ops = {
+
+	/* returns the current frame number */
+	.get_frame	= langwell_get_frame,
+
+	/* tries to wake up the host connected to this gadget */
+	.wakeup		= langwell_wakeup,
+
+	/* set the device selfpowered feature, always selfpowered */
+	/* .set_selfpowered = langwell_set_selfpowered, */
+
+	/* notify controller that VBUS is powered or not */
+	.vbus_session	= langwell_vbus_session,
+
+	/* constrain controller's VBUS power usage */
+	.vbus_draw	= langwell_vbus_draw,
+
+	/* D+ pullup, software-controlled connect/disconnect to USB host */
+	.pullup		= langwell_pullup,
+
+	.udc_start	= langwell_start,
+	.udc_stop	= langwell_stop,
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device controller operations */
+
+/* reset device controller */
+static int langwell_udc_reset(struct langwell_udc *dev)
+{
+	u32		usbcmd, usbmode, devlc, endpointlistaddr;
+	u8		devlc_byte0, devlc_byte2;
+	unsigned long	timeout;
+
+	if (!dev)
+		return -EINVAL;
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* set controller to stop state */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	usbcmd &= ~CMD_RUNSTOP;
+	writel(usbcmd, &dev->op_regs->usbcmd);
+
+	/* reset device controller */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	usbcmd |= CMD_RST;
+	writel(usbcmd, &dev->op_regs->usbcmd);
+
+	/* wait for reset to complete */
+	timeout = jiffies + RESET_TIMEOUT;
+	while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&dev->pdev->dev, "device reset timeout\n");
+			return -ETIMEDOUT;
+		}
+		cpu_relax();
+	}
+
+	/* set controller to device mode */
+	usbmode = readl(&dev->op_regs->usbmode);
+	usbmode |= MODE_DEVICE;
+
+	/* turn setup lockout off, require setup tripwire in usbcmd */
+	usbmode |= MODE_SLOM;
+
+	writel(usbmode, &dev->op_regs->usbmode);
+	usbmode = readl(&dev->op_regs->usbmode);
+	dev_vdbg(&dev->pdev->dev, "usbmode=0x%08x\n", usbmode);
+
+	/* Write-Clear setup status */
+	writel(0, &dev->op_regs->usbsts);
+
+	/* if support USB LPM, ACK all LPM token */
+	if (dev->lpm) {
+		devlc = readl(&dev->op_regs->devlc);
+		dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
+		/* FIXME: workaround for Langwell A1/A2/A3 sighting */
+		devlc &= ~LPM_STL;	/* don't STALL LPM token */
+		devlc &= ~LPM_NYT_ACK;	/* ACK LPM token */
+		devlc_byte0 = devlc & 0xff;
+		devlc_byte2 = (devlc >> 16) & 0xff;
+		writeb(devlc_byte0, (u8 *)&dev->op_regs->devlc);
+		writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
+		devlc = readl(&dev->op_regs->devlc);
+		dev_vdbg(&dev->pdev->dev,
+				"ACK LPM token, devlc = 0x%08x\n", devlc);
+	}
+
+	/* fill endpointlistaddr register */
+	endpointlistaddr = dev->ep_dqh_dma;
+	endpointlistaddr &= ENDPOINTLISTADDR_MASK;
+	writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
+
+	dev_vdbg(&dev->pdev->dev,
+		"dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
+		dev->ep_dqh, endpointlistaddr,
+		readl(&dev->op_regs->endpointlistaddr));
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* reinitialize device controller endpoints */
+static int eps_reinit(struct langwell_udc *dev)
+{
+	struct langwell_ep	*ep;
+	char			name[14];
+	int			i;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* initialize ep0 */
+	ep = &dev->ep[0];
+	ep->dev = dev;
+	strncpy(ep->name, "ep0", sizeof(ep->name));
+	ep->ep.name = ep->name;
+	ep->ep.ops = &langwell_ep_ops;
+	ep->stopped = 0;
+	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+	ep->ep_num = 0;
+	ep->desc = &langwell_ep0_desc;
+	INIT_LIST_HEAD(&ep->queue);
+
+	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+	/* initialize other endpoints */
+	for (i = 2; i < dev->ep_max; i++) {
+		ep = &dev->ep[i];
+		if (i % 2)
+			snprintf(name, sizeof(name), "ep%din", i / 2);
+		else
+			snprintf(name, sizeof(name), "ep%dout", i / 2);
+		ep->dev = dev;
+		strncpy(ep->name, name, sizeof(ep->name));
+		ep->ep.name = ep->name;
+
+		ep->ep.ops = &langwell_ep_ops;
+		ep->stopped = 0;
+		ep->ep.maxpacket = (unsigned short) ~0;
+		ep->ep_num = i / 2;
+
+		INIT_LIST_HEAD(&ep->queue);
+		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+	}
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* enable interrupt and set controller to run state */
+static void langwell_udc_start(struct langwell_udc *dev)
+{
+	u32	usbintr, usbcmd;
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* enable interrupts */
+	usbintr = INTR_ULPIE	/* ULPI */
+		| INTR_SLE	/* suspend */
+		/* | INTR_SRE	SOF received */
+		| INTR_URE	/* USB reset */
+		| INTR_AAE	/* async advance */
+		| INTR_SEE	/* system error */
+		| INTR_FRE	/* frame list rollover */
+		| INTR_PCE	/* port change detect */
+		| INTR_UEE	/* USB error interrupt */
+		| INTR_UE;	/* USB interrupt */
+	writel(usbintr, &dev->op_regs->usbintr);
+
+	/* clear stopped bit */
+	dev->stopped = 0;
+
+	/* set controller to run */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	usbcmd |= CMD_RUNSTOP;
+	writel(usbcmd, &dev->op_regs->usbcmd);
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* disable interrupt and set controller to stop state */
+static void langwell_udc_stop(struct langwell_udc *dev)
+{
+	u32	usbcmd;
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* disable all interrupts */
+	writel(0, &dev->op_regs->usbintr);
+
+	/* set stopped bit */
+	dev->stopped = 1;
+
+	/* set controller to stop state */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	usbcmd &= ~CMD_RUNSTOP;
+	writel(usbcmd, &dev->op_regs->usbcmd);
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* stop all USB activities */
+static void stop_activity(struct langwell_udc *dev)
+{
+	struct langwell_ep	*ep;
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	nuke(&dev->ep[0], -ESHUTDOWN);
+
+	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+		nuke(ep, -ESHUTDOWN);
+	}
+
+	/* report disconnect; the driver is already quiesced */
+	if (dev->driver) {
+		spin_unlock(&dev->lock);
+		dev->driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+	}
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device "function" sysfs attribute file */
+static ssize_t show_function(struct device *_dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct langwell_udc	*dev = dev_get_drvdata(_dev);
+
+	if (!dev->driver || !dev->driver->function
+			|| strlen(dev->driver->function) > PAGE_SIZE)
+		return 0;
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
+}
+static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+
+
+static inline enum usb_device_speed lpm_device_speed(u32 reg)
+{
+	switch (LPM_PSPD(reg)) {
+	case LPM_SPEED_HIGH:
+		return USB_SPEED_HIGH;
+	case LPM_SPEED_FULL:
+		return USB_SPEED_FULL;
+	case LPM_SPEED_LOW:
+		return USB_SPEED_LOW;
+	default:
+		return USB_SPEED_UNKNOWN;
+	}
+}
+
+/* device "langwell_udc" sysfs attribute file */
+static ssize_t show_langwell_udc(struct device *_dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct langwell_udc	*dev = dev_get_drvdata(_dev);
+	struct langwell_request *req;
+	struct langwell_ep	*ep = NULL;
+	char			*next;
+	unsigned		size;
+	unsigned		t;
+	unsigned		i;
+	unsigned long		flags;
+	u32			tmp_reg;
+
+	next = buf;
+	size = PAGE_SIZE;
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* driver basic information */
+	t = scnprintf(next, size,
+			DRIVER_DESC "\n"
+			"%s version: %s\n"
+			"Gadget driver: %s\n\n",
+			driver_name, DRIVER_VERSION,
+			dev->driver ? dev->driver->driver.name : "(none)");
+	size -= t;
+	next += t;
+
+	/* device registers */
+	tmp_reg = readl(&dev->op_regs->usbcmd);
+	t = scnprintf(next, size,
+			"USBCMD reg:\n"
+			"SetupTW: %d\n"
+			"Run/Stop: %s\n\n",
+			(tmp_reg & CMD_SUTW) ? 1 : 0,
+			(tmp_reg & CMD_RUNSTOP) ? "Run" : "Stop");
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->usbsts);
+	t = scnprintf(next, size,
+			"USB Status Reg:\n"
+			"Device Suspend: %d\n"
+			"Reset Received: %d\n"
+			"System Error: %s\n"
+			"USB Error Interrupt: %s\n\n",
+			(tmp_reg & STS_SLI) ? 1 : 0,
+			(tmp_reg & STS_URI) ? 1 : 0,
+			(tmp_reg & STS_SEI) ? "Error" : "No error",
+			(tmp_reg & STS_UEI) ? "Error detected" : "No error");
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->usbintr);
+	t = scnprintf(next, size,
+			"USB Intrrupt Enable Reg:\n"
+			"Sleep Enable: %d\n"
+			"SOF Received Enable: %d\n"
+			"Reset Enable: %d\n"
+			"System Error Enable: %d\n"
+			"Port Change Dectected Enable: %d\n"
+			"USB Error Intr Enable: %d\n"
+			"USB Intr Enable: %d\n\n",
+			(tmp_reg & INTR_SLE) ? 1 : 0,
+			(tmp_reg & INTR_SRE) ? 1 : 0,
+			(tmp_reg & INTR_URE) ? 1 : 0,
+			(tmp_reg & INTR_SEE) ? 1 : 0,
+			(tmp_reg & INTR_PCE) ? 1 : 0,
+			(tmp_reg & INTR_UEE) ? 1 : 0,
+			(tmp_reg & INTR_UE) ? 1 : 0);
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->frindex);
+	t = scnprintf(next, size,
+			"USB Frame Index Reg:\n"
+			"Frame Number is 0x%08x\n\n",
+			(tmp_reg & FRINDEX_MASK));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->deviceaddr);
+	t = scnprintf(next, size,
+			"USB Device Address Reg:\n"
+			"Device Addr is 0x%x\n\n",
+			USBADR(tmp_reg));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->endpointlistaddr);
+	t = scnprintf(next, size,
+			"USB Endpoint List Address Reg:\n"
+			"Endpoint List Pointer is 0x%x\n\n",
+			EPBASE(tmp_reg));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->portsc1);
+	t = scnprintf(next, size,
+		"USB Port Status & Control Reg:\n"
+		"Port Reset: %s\n"
+		"Port Suspend Mode: %s\n"
+		"Over-current Change: %s\n"
+		"Port Enable/Disable Change: %s\n"
+		"Port Enabled/Disabled: %s\n"
+		"Current Connect Status: %s\n"
+		"LPM Suspend Status: %s\n\n",
+		(tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
+		(tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
+		(tmp_reg & PORTS_OCC) ? "Detected" : "No",
+		(tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
+		(tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
+		(tmp_reg & PORTS_CCS) ?  "Attached" : "Not Attached",
+		(tmp_reg & PORTS_SLP) ? "LPM L1" : "LPM L0");
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->devlc);
+	t = scnprintf(next, size,
+		"Device LPM Control Reg:\n"
+		"Parallel Transceiver : %d\n"
+		"Serial Transceiver : %d\n"
+		"Port Speed: %s\n"
+		"Port Force Full Speed Connenct: %s\n"
+		"PHY Low Power Suspend Clock: %s\n"
+		"BmAttributes: %d\n\n",
+		LPM_PTS(tmp_reg),
+		(tmp_reg & LPM_STS) ? 1 : 0,
+		usb_speed_string(lpm_device_speed(tmp_reg)),
+		(tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
+		(tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
+		LPM_BA(tmp_reg));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->usbmode);
+	t = scnprintf(next, size,
+			"USB Mode Reg:\n"
+			"Controller Mode is : %s\n\n", ({
+				char *s;
+				switch (MODE_CM(tmp_reg)) {
+				case MODE_IDLE:
+					s = "Idle"; break;
+				case MODE_DEVICE:
+					s = "Device Controller"; break;
+				case MODE_HOST:
+					s = "Host Controller"; break;
+				default:
+					s = "None"; break;
+				}
+				s;
+			}));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->endptsetupstat);
+	t = scnprintf(next, size,
+			"Endpoint Setup Status Reg:\n"
+			"SETUP on ep 0x%04x\n\n",
+			tmp_reg & SETUPSTAT_MASK);
+	size -= t;
+	next += t;
+
+	for (i = 0; i < dev->ep_max / 2; i++) {
+		tmp_reg = readl(&dev->op_regs->endptctrl[i]);
+		t = scnprintf(next, size, "EP Ctrl Reg [%d]: 0x%08x\n",
+				i, tmp_reg);
+		size -= t;
+		next += t;
+	}
+	tmp_reg = readl(&dev->op_regs->endptprime);
+	t = scnprintf(next, size, "EP Prime Reg: 0x%08x\n\n", tmp_reg);
+	size -= t;
+	next += t;
+
+	/* langwell_udc, langwell_ep, langwell_request structure information */
+	ep = &dev->ep[0];
+	t = scnprintf(next, size, "%s MaxPacketSize: 0x%x, ep_num: %d\n",
+			ep->ep.name, ep->ep.maxpacket, ep->ep_num);
+	size -= t;
+	next += t;
+
+	if (list_empty(&ep->queue)) {
+		t = scnprintf(next, size, "its req queue is empty\n\n");
+		size -= t;
+		next += t;
+	} else {
+		list_for_each_entry(req, &ep->queue, queue) {
+			t = scnprintf(next, size,
+				"req %p actual 0x%x length 0x%x  buf %p\n",
+				&req->req, req->req.actual,
+				req->req.length, req->req.buf);
+			size -= t;
+			next += t;
+		}
+	}
+	/* other gadget->eplist ep */
+	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+		if (ep->desc) {
+			t = scnprintf(next, size,
+					"\n%s MaxPacketSize: 0x%x, "
+					"ep_num: %d\n",
+					ep->ep.name, ep->ep.maxpacket,
+					ep->ep_num);
+			size -= t;
+			next += t;
+
+			if (list_empty(&ep->queue)) {
+				t = scnprintf(next, size,
+						"its req queue is empty\n\n");
+				size -= t;
+				next += t;
+			} else {
+				list_for_each_entry(req, &ep->queue, queue) {
+					t = scnprintf(next, size,
+						"req %p actual 0x%x length "
+						"0x%x  buf %p\n",
+						&req->req, req->req.actual,
+						req->req.length, req->req.buf);
+					size -= t;
+					next += t;
+				}
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
+
+
+/* device "remote_wakeup" sysfs attribute file */
+static ssize_t store_remote_wakeup(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct langwell_udc	*dev = dev_get_drvdata(_dev);
+	unsigned long		flags;
+	ssize_t			rc = count;
+
+	if (count > 2)
+		return -EINVAL;
+
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	if (buf[0] != '1')
+		return -EINVAL;
+
+	/* force remote wakeup enabled in case gadget driver doesn't support */
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->remote_wakeup = 1;
+	dev->dev_status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	langwell_wakeup(&dev->gadget);
+
+	return rc;
+}
+static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+
+static int langwell_start(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct langwell_udc	*dev = gadget_to_langwell(g);
+	unsigned long		flags;
+	int			retval;
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* hook up the driver ... */
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
+	if (retval)
+		goto err;
+
+	dev->usb_state = USB_STATE_ATTACHED;
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	/* enable interrupt and set controller to run state */
+	if (dev->got_irq)
+		langwell_udc_start(dev);
+
+	dev_vdbg(&dev->pdev->dev,
+			"After langwell_udc_start(), print all registers:\n");
+	print_all_registers(dev);
+
+	dev_info(&dev->pdev->dev, "register driver: %s\n",
+			driver->driver.name);
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
+	return 0;
+
+err:
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
+	return retval;
+}
+
+/* unregister gadget driver */
+static int langwell_stop(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct langwell_udc	*dev = gadget_to_langwell(g);
+	unsigned long		flags;
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* exit PHY low power suspend */
+	if (dev->pdev->device != 0x0829)
+		langwell_phy_low_power(dev, 0);
+
+	/* unbind OTG transceiver */
+	if (dev->transceiver)
+		(void)otg_set_peripheral(dev->transceiver->otg, 0);
+
+	/* disable interrupt and set controller to stop state */
+	langwell_udc_stop(dev);
+
+	dev->usb_state = USB_STATE_ATTACHED;
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* stop all usb activities */
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+	stop_activity(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	device_remove_file(&dev->pdev->dev, &dev_attr_function);
+
+	dev_info(&dev->pdev->dev, "unregistered driver '%s'\n",
+			driver->driver.name);
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * setup tripwire is used as a semaphore to ensure that the setup data
+ * payload is extracted from a dQH without being corrupted
+ */
+static void setup_tripwire(struct langwell_udc *dev)
+{
+	u32			usbcmd,
+				endptsetupstat;
+	unsigned long		timeout;
+	struct langwell_dqh	*dqh;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* ep0 OUT dQH */
+	dqh = &dev->ep_dqh[EP_DIR_OUT];
+
+	/* Write-Clear endptsetupstat */
+	endptsetupstat = readl(&dev->op_regs->endptsetupstat);
+	writel(endptsetupstat, &dev->op_regs->endptsetupstat);
+
+	/* wait until endptsetupstat is cleared */
+	timeout = jiffies + SETUPSTAT_TIMEOUT;
+	while (readl(&dev->op_regs->endptsetupstat)) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&dev->pdev->dev, "setup_tripwire timeout\n");
+			break;
+		}
+		cpu_relax();
+	}
+
+	/* while a hazard exists when setup packet arrives */
+	do {
+		/* set setup tripwire bit */
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		writel(usbcmd | CMD_SUTW, &dev->op_regs->usbcmd);
+
+		/* copy the setup packet to local buffer */
+		memcpy(&dev->local_setup_buff, &dqh->dqh_setup, 8);
+	} while (!(readl(&dev->op_regs->usbcmd) & CMD_SUTW));
+
+	/* Write-Clear setup tripwire bit */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* protocol ep0 stall, will automatically be cleared on new transaction */
+static void ep0_stall(struct langwell_udc *dev)
+{
+	u32	endptctrl;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* set TX and RX to stall */
+	endptctrl = readl(&dev->op_regs->endptctrl[0]);
+	endptctrl |= EPCTRL_TXS | EPCTRL_RXS;
+	writel(endptctrl, &dev->op_regs->endptctrl[0]);
+
+	/* update ep0 state */
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* PRIME a status phase for ep0 */
+static int prime_status_phase(struct langwell_udc *dev, int dir)
+{
+	struct langwell_request	*req;
+	struct langwell_ep	*ep;
+	int			status = 0;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (dir == EP_DIR_IN)
+		dev->ep0_dir = USB_DIR_IN;
+	else
+		dev->ep0_dir = USB_DIR_OUT;
+
+	ep = &dev->ep[0];
+	dev->ep0_state = WAIT_FOR_OUT_STATUS;
+
+	req = dev->status_req;
+
+	req->ep = ep;
+	req->req.length = 0;
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = NULL;
+	req->dtd_count = 0;
+
+	if (!req_to_dtd(req))
+		status = queue_dtd(ep, req);
+	else
+		return -ENOMEM;
+
+	if (status)
+		dev_err(&dev->pdev->dev, "can't queue ep0 status request\n");
+
+	list_add_tail(&req->queue, &ep->queue);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return status;
+}
+
+
+/* SET_ADDRESS request routine */
+static void set_address(struct langwell_udc *dev, u16 value,
+		u16 index, u16 length)
+{
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* save the new address to device struct */
+	dev->dev_addr = (u8) value;
+	dev_vdbg(&dev->pdev->dev, "dev->dev_addr = %d\n", dev->dev_addr);
+
+	/* update usb state */
+	dev->usb_state = USB_STATE_ADDRESS;
+
+	/* STATUS phase */
+	if (prime_status_phase(dev, EP_DIR_IN))
+		ep0_stall(dev);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* return endpoint by windex */
+static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
+		u16 wIndex)
+{
+	struct langwell_ep		*ep;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+		return &dev->ep[0];
+
+	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+		u8	bEndpointAddress;
+		if (!ep->desc)
+			continue;
+
+		bEndpointAddress = ep->desc->bEndpointAddress;
+		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+			continue;
+
+		if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
+			== (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
+			return ep;
+	}
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return NULL;
+}
+
+
+/* return whether endpoint is stalled, 0: not stalled; 1: stalled */
+static int ep_is_stall(struct langwell_ep *ep)
+{
+	struct langwell_udc	*dev = ep->dev;
+	u32			endptctrl;
+	int			retval;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
+	if (is_in(ep))
+		retval = endptctrl & EPCTRL_TXS ? 1 : 0;
+	else
+		retval = endptctrl & EPCTRL_RXS ? 1 : 0;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/* GET_STATUS request routine */
+static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
+		u16 index, u16 length)
+{
+	struct langwell_request	*req;
+	struct langwell_ep	*ep;
+	u16	status_data = 0;	/* 16 bits cpu view status data */
+	int	status = 0;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	ep = &dev->ep[0];
+
+	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		/* get device status */
+		status_data = dev->dev_status;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+		/* get interface status */
+		status_data = 0;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+		/* get endpoint status */
+		struct langwell_ep	*epn;
+		epn = get_ep_by_windex(dev, index);
+		/* stall if endpoint doesn't exist */
+		if (!epn)
+			goto stall;
+
+		status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
+	}
+
+	dev_dbg(&dev->pdev->dev, "get status data: 0x%04x\n", status_data);
+
+	dev->ep0_dir = USB_DIR_IN;
+
+	/* borrow the per device status_req */
+	req = dev->status_req;
+
+	/* fill in the reqest structure */
+	*((u16 *) req->req.buf) = cpu_to_le16(status_data);
+	req->ep = ep;
+	req->req.length = 2;
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = NULL;
+	req->dtd_count = 0;
+
+	/* prime the data phase */
+	if (!req_to_dtd(req))
+		status = queue_dtd(ep, req);
+	else			/* no mem */
+		goto stall;
+
+	if (status) {
+		dev_err(&dev->pdev->dev,
+				"response error on GET_STATUS request\n");
+		goto stall;
+	}
+
+	list_add_tail(&req->queue, &ep->queue);
+	dev->ep0_state = DATA_STATE_XMIT;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return;
+stall:
+	ep0_stall(dev);
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* setup packet interrupt handler */
+static void handle_setup_packet(struct langwell_udc *dev,
+		struct usb_ctrlrequest *setup)
+{
+	u16	wValue = le16_to_cpu(setup->wValue);
+	u16	wIndex = le16_to_cpu(setup->wIndex);
+	u16	wLength = le16_to_cpu(setup->wLength);
+	u32	portsc1;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* ep0 fifo flush */
+	nuke(&dev->ep[0], -ESHUTDOWN);
+
+	dev_dbg(&dev->pdev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+			setup->bRequestType, setup->bRequest,
+			wValue, wIndex, wLength);
+
+	/* RNDIS gadget delegate */
+	if ((setup->bRequestType == 0x21) && (setup->bRequest == 0x00)) {
+		/* USB_CDC_SEND_ENCAPSULATED_COMMAND */
+		goto delegate;
+	}
+
+	/* USB_CDC_GET_ENCAPSULATED_RESPONSE */
+	if ((setup->bRequestType == 0xa1) && (setup->bRequest == 0x01)) {
+		/* USB_CDC_GET_ENCAPSULATED_RESPONSE */
+		goto delegate;
+	}
+
+	/* We process some stardard setup requests here */
+	switch (setup->bRequest) {
+	case USB_REQ_GET_STATUS:
+		dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_GET_STATUS\n");
+		/* get status, DATA and STATUS phase */
+		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+					!= (USB_DIR_IN | USB_TYPE_STANDARD))
+			break;
+		get_status(dev, setup->bRequestType, wValue, wIndex, wLength);
+		goto end;
+
+	case USB_REQ_SET_ADDRESS:
+		dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_SET_ADDRESS\n");
+		/* STATUS phase */
+		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
+						| USB_RECIP_DEVICE))
+			break;
+		set_address(dev, wValue, wIndex, wLength);
+		goto end;
+
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+		/* STATUS phase */
+	{
+		int rc = -EOPNOTSUPP;
+		if (setup->bRequest == USB_REQ_SET_FEATURE)
+			dev_dbg(&dev->pdev->dev,
+					"SETUP: USB_REQ_SET_FEATURE\n");
+		else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
+			dev_dbg(&dev->pdev->dev,
+					"SETUP: USB_REQ_CLEAR_FEATURE\n");
+
+		if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+				== (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
+			struct langwell_ep	*epn;
+			epn = get_ep_by_windex(dev, wIndex);
+			/* stall if endpoint doesn't exist */
+			if (!epn) {
+				ep0_stall(dev);
+				goto end;
+			}
+
+			if (wValue != 0 || wLength != 0
+					|| epn->ep_num > dev->ep_max)
+				break;
+
+			spin_unlock(&dev->lock);
+			rc = langwell_ep_set_halt(&epn->ep,
+				(setup->bRequest == USB_REQ_SET_FEATURE)
+				? 1 : 0);
+			spin_lock(&dev->lock);
+
+		} else if ((setup->bRequestType & (USB_RECIP_MASK
+				| USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+				| USB_TYPE_STANDARD)) {
+			rc = 0;
+			switch (wValue) {
+			case USB_DEVICE_REMOTE_WAKEUP:
+				if (setup->bRequest == USB_REQ_SET_FEATURE) {
+					dev->remote_wakeup = 1;
+					dev->dev_status |= (1 << wValue);
+				} else {
+					dev->remote_wakeup = 0;
+					dev->dev_status &= ~(1 << wValue);
+				}
+				break;
+			case USB_DEVICE_TEST_MODE:
+				dev_dbg(&dev->pdev->dev, "SETUP: TEST MODE\n");
+				if ((wIndex & 0xff) ||
+					(dev->gadget.speed != USB_SPEED_HIGH))
+					ep0_stall(dev);
+
+				switch (wIndex >> 8) {
+				case TEST_J:
+				case TEST_K:
+				case TEST_SE0_NAK:
+				case TEST_PACKET:
+				case TEST_FORCE_EN:
+					if (prime_status_phase(dev, EP_DIR_IN))
+						ep0_stall(dev);
+					portsc1 = readl(&dev->op_regs->portsc1);
+					portsc1 |= (wIndex & 0xf00) << 8;
+					writel(portsc1, &dev->op_regs->portsc1);
+					goto end;
+				default:
+					rc = -EOPNOTSUPP;
+				}
+				break;
+			default:
+				rc = -EOPNOTSUPP;
+				break;
+			}
+
+			if (!gadget_is_otg(&dev->gadget))
+				break;
+			else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
+				dev->gadget.b_hnp_enable = 1;
+			else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
+				dev->gadget.a_hnp_support = 1;
+			else if (setup->bRequest ==
+					USB_DEVICE_A_ALT_HNP_SUPPORT)
+				dev->gadget.a_alt_hnp_support = 1;
+			else
+				break;
+		} else
+			break;
+
+		if (rc == 0) {
+			if (prime_status_phase(dev, EP_DIR_IN))
+				ep0_stall(dev);
+		}
+		goto end;
+	}
+
+	case USB_REQ_GET_DESCRIPTOR:
+		dev_dbg(&dev->pdev->dev,
+				"SETUP: USB_REQ_GET_DESCRIPTOR\n");
+		goto delegate;
+
+	case USB_REQ_SET_DESCRIPTOR:
+		dev_dbg(&dev->pdev->dev,
+				"SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
+		goto delegate;
+
+	case USB_REQ_GET_CONFIGURATION:
+		dev_dbg(&dev->pdev->dev,
+				"SETUP: USB_REQ_GET_CONFIGURATION\n");
+		goto delegate;
+
+	case USB_REQ_SET_CONFIGURATION:
+		dev_dbg(&dev->pdev->dev,
+				"SETUP: USB_REQ_SET_CONFIGURATION\n");
+		goto delegate;
+
+	case USB_REQ_GET_INTERFACE:
+		dev_dbg(&dev->pdev->dev,
+				"SETUP: USB_REQ_GET_INTERFACE\n");
+		goto delegate;
+
+	case USB_REQ_SET_INTERFACE:
+		dev_dbg(&dev->pdev->dev,
+				"SETUP: USB_REQ_SET_INTERFACE\n");
+		goto delegate;
+
+	case USB_REQ_SYNCH_FRAME:
+		dev_dbg(&dev->pdev->dev,
+				"SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
+		goto delegate;
+
+	default:
+		/* delegate USB standard requests to the gadget driver */
+		goto delegate;
+delegate:
+		/* USB requests handled by gadget */
+		if (wLength) {
+			/* DATA phase from gadget, STATUS phase from udc */
+			dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+					?  USB_DIR_IN : USB_DIR_OUT;
+			dev_vdbg(&dev->pdev->dev,
+					"dev->ep0_dir = 0x%x, wLength = %d\n",
+					dev->ep0_dir, wLength);
+			spin_unlock(&dev->lock);
+			if (dev->driver->setup(&dev->gadget,
+					&dev->local_setup_buff) < 0)
+				ep0_stall(dev);
+			spin_lock(&dev->lock);
+			dev->ep0_state = (setup->bRequestType & USB_DIR_IN)
+					?  DATA_STATE_XMIT : DATA_STATE_RECV;
+		} else {
+			/* no DATA phase, IN STATUS phase from gadget */
+			dev->ep0_dir = USB_DIR_IN;
+			dev_vdbg(&dev->pdev->dev,
+					"dev->ep0_dir = 0x%x, wLength = %d\n",
+					dev->ep0_dir, wLength);
+			spin_unlock(&dev->lock);
+			if (dev->driver->setup(&dev->gadget,
+					&dev->local_setup_buff) < 0)
+				ep0_stall(dev);
+			spin_lock(&dev->lock);
+			dev->ep0_state = WAIT_FOR_OUT_STATUS;
+		}
+		break;
+	}
+end:
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* transfer completion, process endpoint request and free the completed dTDs
+ * for this request
+ */
+static int process_ep_req(struct langwell_udc *dev, int index,
+		struct langwell_request *curr_req)
+{
+	struct langwell_dtd	*curr_dtd;
+	struct langwell_dqh	*curr_dqh;
+	int			td_complete, actual, remaining_length;
+	int			i, dir;
+	u8			dtd_status = 0;
+	int			retval = 0;
+
+	curr_dqh = &dev->ep_dqh[index];
+	dir = index % 2;
+
+	curr_dtd = curr_req->head;
+	td_complete = 0;
+	actual = curr_req->req.length;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	for (i = 0; i < curr_req->dtd_count; i++) {
+
+		/* command execution states by dTD */
+		dtd_status = curr_dtd->dtd_status;
+
+		barrier();
+		remaining_length = le16_to_cpu(curr_dtd->dtd_total);
+		actual -= remaining_length;
+
+		if (!dtd_status) {
+			/* transfers completed successfully */
+			if (!remaining_length) {
+				td_complete++;
+				dev_vdbg(&dev->pdev->dev,
+					"dTD transmitted successfully\n");
+			} else {
+				if (dir) {
+					dev_vdbg(&dev->pdev->dev,
+						"TX dTD remains data\n");
+					retval = -EPROTO;
+					break;
+
+				} else {
+					td_complete++;
+					break;
+				}
+			}
+		} else {
+			/* transfers completed with errors */
+			if (dtd_status & DTD_STS_ACTIVE) {
+				dev_dbg(&dev->pdev->dev,
+					"dTD status ACTIVE dQH[%d]\n", index);
+				retval = 1;
+				return retval;
+			} else if (dtd_status & DTD_STS_HALTED) {
+				dev_err(&dev->pdev->dev,
+					"dTD error %08x dQH[%d]\n",
+					dtd_status, index);
+				/* clear the errors and halt condition */
+				curr_dqh->dtd_status = 0;
+				retval = -EPIPE;
+				break;
+			} else if (dtd_status & DTD_STS_DBE) {
+				dev_dbg(&dev->pdev->dev,
+					"data buffer (overflow) error\n");
+				retval = -EPROTO;
+				break;
+			} else if (dtd_status & DTD_STS_TRE) {
+				dev_dbg(&dev->pdev->dev,
+					"transaction(ISO) error\n");
+				retval = -EILSEQ;
+				break;
+			} else
+				dev_err(&dev->pdev->dev,
+					"unknown error (0x%x)!\n",
+					dtd_status);
+		}
+
+		if (i != curr_req->dtd_count - 1)
+			curr_dtd = (struct langwell_dtd *)
+				curr_dtd->next_dtd_virt;
+	}
+
+	if (retval)
+		return retval;
+
+	curr_req->req.actual = actual;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* complete DATA or STATUS phase of ep0 prime status phase if needed */
+static void ep0_req_complete(struct langwell_udc *dev,
+		struct langwell_ep *ep0, struct langwell_request *req)
+{
+	u32	new_addr;
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (dev->usb_state == USB_STATE_ADDRESS) {
+		/* set the new address */
+		new_addr = (u32)dev->dev_addr;
+		writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
+
+		new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
+		dev_vdbg(&dev->pdev->dev, "new_addr = %d\n", new_addr);
+	}
+
+	done(ep0, req, 0);
+
+	switch (dev->ep0_state) {
+	case DATA_STATE_XMIT:
+		/* receive status phase */
+		if (prime_status_phase(dev, EP_DIR_OUT))
+			ep0_stall(dev);
+		break;
+	case DATA_STATE_RECV:
+		/* send status phase */
+		if (prime_status_phase(dev, EP_DIR_IN))
+			ep0_stall(dev);
+		break;
+	case WAIT_FOR_OUT_STATUS:
+		dev->ep0_state = WAIT_FOR_SETUP;
+		break;
+	case WAIT_FOR_SETUP:
+		dev_err(&dev->pdev->dev, "unexpect ep0 packets\n");
+		break;
+	default:
+		ep0_stall(dev);
+		break;
+	}
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB transfer completion interrupt */
+static void handle_trans_complete(struct langwell_udc *dev)
+{
+	u32			complete_bits;
+	int			i, ep_num, dir, bit_mask, status;
+	struct langwell_ep	*epn;
+	struct langwell_request	*curr_req, *temp_req;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	complete_bits = readl(&dev->op_regs->endptcomplete);
+	dev_vdbg(&dev->pdev->dev, "endptcomplete register: 0x%08x\n",
+			complete_bits);
+
+	/* Write-Clear the bits in endptcomplete register */
+	writel(complete_bits, &dev->op_regs->endptcomplete);
+
+	if (!complete_bits) {
+		dev_dbg(&dev->pdev->dev, "complete_bits = 0\n");
+		goto done;
+	}
+
+	for (i = 0; i < dev->ep_max; i++) {
+		ep_num = i / 2;
+		dir = i % 2;
+
+		bit_mask = 1 << (ep_num + 16 * dir);
+
+		if (!(complete_bits & bit_mask))
+			continue;
+
+		/* ep0 */
+		if (i == 1)
+			epn = &dev->ep[0];
+		else
+			epn = &dev->ep[i];
+
+		if (epn->name == NULL) {
+			dev_warn(&dev->pdev->dev, "invalid endpoint\n");
+			continue;
+		}
+
+		if (i < 2)
+			/* ep0 in and out */
+			dev_dbg(&dev->pdev->dev, "%s-%s transfer completed\n",
+					epn->name,
+					is_in(epn) ? "in" : "out");
+		else
+			dev_dbg(&dev->pdev->dev, "%s transfer completed\n",
+					epn->name);
+
+		/* process the req queue until an uncomplete request */
+		list_for_each_entry_safe(curr_req, temp_req,
+				&epn->queue, queue) {
+			status = process_ep_req(dev, i, curr_req);
+			dev_vdbg(&dev->pdev->dev, "%s req status: %d\n",
+					epn->name, status);
+
+			if (status)
+				break;
+
+			/* write back status to req */
+			curr_req->req.status = status;
+
+			/* ep0 request completion */
+			if (ep_num == 0) {
+				ep0_req_complete(dev, epn, curr_req);
+				break;
+			} else {
+				done(epn, curr_req, status);
+			}
+		}
+	}
+done:
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+/* port change detect interrupt handler */
+static void handle_port_change(struct langwell_udc *dev)
+{
+	u32	portsc1, devlc;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (dev->bus_reset)
+		dev->bus_reset = 0;
+
+	portsc1 = readl(&dev->op_regs->portsc1);
+	devlc = readl(&dev->op_regs->devlc);
+	dev_vdbg(&dev->pdev->dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
+			portsc1, devlc);
+
+	/* bus reset is finished */
+	if (!(portsc1 & PORTS_PR)) {
+		/* get the speed */
+		dev->gadget.speed = lpm_device_speed(devlc);
+		dev_vdbg(&dev->pdev->dev, "dev->gadget.speed = %d\n",
+			dev->gadget.speed);
+	}
+
+	/* LPM L0 to L1 */
+	if (dev->lpm && dev->lpm_state == LPM_L0)
+		if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
+			dev_info(&dev->pdev->dev, "LPM L0 to L1\n");
+			dev->lpm_state = LPM_L1;
+		}
+
+	/* LPM L1 to L0, force resume or remote wakeup finished */
+	if (dev->lpm && dev->lpm_state == LPM_L1)
+		if (!(portsc1 & PORTS_SUSP)) {
+			dev_info(&dev->pdev->dev, "LPM L1 to L0\n");
+			dev->lpm_state = LPM_L0;
+		}
+
+	/* update USB state */
+	if (!dev->resume_state)
+		dev->usb_state = USB_STATE_DEFAULT;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB reset interrupt handler */
+static void handle_usb_reset(struct langwell_udc *dev)
+{
+	u32		deviceaddr,
+			endptsetupstat,
+			endptcomplete;
+	unsigned long	timeout;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* Write-Clear the device address */
+	deviceaddr = readl(&dev->op_regs->deviceaddr);
+	writel(deviceaddr & ~USBADR_MASK, &dev->op_regs->deviceaddr);
+
+	dev->dev_addr = 0;
+
+	/* clear usb state */
+	dev->resume_state = 0;
+
+	/* LPM L1 to L0, reset */
+	if (dev->lpm)
+		dev->lpm_state = LPM_L0;
+
+	dev->ep0_dir = USB_DIR_OUT;
+	dev->ep0_state = WAIT_FOR_SETUP;
+
+	/* remote wakeup reset to 0 when the device is reset */
+	dev->remote_wakeup = 0;
+	dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
+	dev->gadget.b_hnp_enable = 0;
+	dev->gadget.a_hnp_support = 0;
+	dev->gadget.a_alt_hnp_support = 0;
+
+	/* Write-Clear all the setup token semaphores */
+	endptsetupstat = readl(&dev->op_regs->endptsetupstat);
+	writel(endptsetupstat, &dev->op_regs->endptsetupstat);
+
+	/* Write-Clear all the endpoint complete status bits */
+	endptcomplete = readl(&dev->op_regs->endptcomplete);
+	writel(endptcomplete, &dev->op_regs->endptcomplete);
+
+	/* wait until all endptprime bits cleared */
+	timeout = jiffies + PRIME_TIMEOUT;
+	while (readl(&dev->op_regs->endptprime)) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&dev->pdev->dev, "USB reset timeout\n");
+			break;
+		}
+		cpu_relax();
+	}
+
+	/* write 1s to endptflush register to clear any primed buffers */
+	writel((u32) ~0, &dev->op_regs->endptflush);
+
+	if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
+		dev_vdbg(&dev->pdev->dev, "USB bus reset\n");
+		/* bus is reseting */
+		dev->bus_reset = 1;
+
+		/* reset all the queues, stop all USB activities */
+		stop_activity(dev);
+		dev->usb_state = USB_STATE_DEFAULT;
+	} else {
+		dev_vdbg(&dev->pdev->dev, "device controller reset\n");
+		/* controller reset */
+		langwell_udc_reset(dev);
+
+		/* reset all the queues, stop all USB activities */
+		stop_activity(dev);
+
+		/* reset ep0 dQH and endptctrl */
+		ep0_reset(dev);
+
+		/* enable interrupt and set controller to run state */
+		langwell_udc_start(dev);
+
+		dev->usb_state = USB_STATE_ATTACHED;
+	}
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB bus suspend/resume interrupt */
+static void handle_bus_suspend(struct langwell_udc *dev)
+{
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	dev->resume_state = dev->usb_state;
+	dev->usb_state = USB_STATE_SUSPENDED;
+
+	/* report suspend to the driver */
+	if (dev->driver) {
+		if (dev->driver->suspend) {
+			spin_unlock(&dev->lock);
+			dev->driver->suspend(&dev->gadget);
+			spin_lock(&dev->lock);
+			dev_dbg(&dev->pdev->dev, "suspend %s\n",
+					dev->driver->driver.name);
+		}
+	}
+
+	/* enter PHY low power suspend */
+	if (dev->pdev->device != 0x0829)
+		langwell_phy_low_power(dev, 0);
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+static void handle_bus_resume(struct langwell_udc *dev)
+{
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	dev->usb_state = dev->resume_state;
+	dev->resume_state = 0;
+
+	/* exit PHY low power suspend */
+	if (dev->pdev->device != 0x0829)
+		langwell_phy_low_power(dev, 0);
+
+	/* report resume to the driver */
+	if (dev->driver) {
+		if (dev->driver->resume) {
+			spin_unlock(&dev->lock);
+			dev->driver->resume(&dev->gadget);
+			spin_lock(&dev->lock);
+			dev_dbg(&dev->pdev->dev, "resume %s\n",
+					dev->driver->driver.name);
+		}
+	}
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB device controller interrupt handler */
+static irqreturn_t langwell_irq(int irq, void *_dev)
+{
+	struct langwell_udc	*dev = _dev;
+	u32			usbsts,
+				usbintr,
+				irq_sts,
+				portsc1;
+
+	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	if (dev->stopped) {
+		dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
+		dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+		return IRQ_NONE;
+	}
+
+	spin_lock(&dev->lock);
+
+	/* USB status */
+	usbsts = readl(&dev->op_regs->usbsts);
+
+	/* USB interrupt enable */
+	usbintr = readl(&dev->op_regs->usbintr);
+
+	irq_sts = usbsts & usbintr;
+	dev_vdbg(&dev->pdev->dev,
+			"usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
+			usbsts, usbintr, irq_sts);
+
+	if (!irq_sts) {
+		dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
+		dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+		spin_unlock(&dev->lock);
+		return IRQ_NONE;
+	}
+
+	/* Write-Clear interrupt status bits */
+	writel(irq_sts, &dev->op_regs->usbsts);
+
+	/* resume from suspend */
+	portsc1 = readl(&dev->op_regs->portsc1);
+	if (dev->usb_state == USB_STATE_SUSPENDED)
+		if (!(portsc1 & PORTS_SUSP))
+			handle_bus_resume(dev);
+
+	/* USB interrupt */
+	if (irq_sts & STS_UI) {
+		dev_vdbg(&dev->pdev->dev, "USB interrupt\n");
+
+		/* setup packet received from ep0 */
+		if (readl(&dev->op_regs->endptsetupstat)
+				& EP0SETUPSTAT_MASK) {
+			dev_vdbg(&dev->pdev->dev,
+				"USB SETUP packet received interrupt\n");
+			/* setup tripwire semaphone */
+			setup_tripwire(dev);
+			handle_setup_packet(dev, &dev->local_setup_buff);
+		}
+
+		/* USB transfer completion */
+		if (readl(&dev->op_regs->endptcomplete)) {
+			dev_vdbg(&dev->pdev->dev,
+				"USB transfer completion interrupt\n");
+			handle_trans_complete(dev);
+		}
+	}
+
+	/* SOF received interrupt (for ISO transfer) */
+	if (irq_sts & STS_SRI) {
+		/* FIXME */
+		/* dev_vdbg(&dev->pdev->dev, "SOF received interrupt\n"); */
+	}
+
+	/* port change detect interrupt */
+	if (irq_sts & STS_PCI) {
+		dev_vdbg(&dev->pdev->dev, "port change detect interrupt\n");
+		handle_port_change(dev);
+	}
+
+	/* suspend interrupt */
+	if (irq_sts & STS_SLI) {
+		dev_vdbg(&dev->pdev->dev, "suspend interrupt\n");
+		handle_bus_suspend(dev);
+	}
+
+	/* USB reset interrupt */
+	if (irq_sts & STS_URI) {
+		dev_vdbg(&dev->pdev->dev, "USB reset interrupt\n");
+		handle_usb_reset(dev);
+	}
+
+	/* USB error or system error interrupt */
+	if (irq_sts & (STS_UEI | STS_SEI)) {
+		/* FIXME */
+		dev_warn(&dev->pdev->dev, "error IRQ, irq_sts: %x\n", irq_sts);
+	}
+
+	spin_unlock(&dev->lock);
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return IRQ_HANDLED;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* release device structure */
+static void gadget_release(struct device *_dev)
+{
+	struct langwell_udc	*dev = dev_get_drvdata(_dev);
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	complete(dev->done);
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	kfree(dev);
+}
+
+
+/* enable SRAM caching if SRAM detected */
+static void sram_init(struct langwell_udc *dev)
+{
+	struct pci_dev		*pdev = dev->pdev;
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	dev->sram_addr = pci_resource_start(pdev, 1);
+	dev->sram_size = pci_resource_len(pdev, 1);
+	dev_info(&dev->pdev->dev, "Found private SRAM at %x size:%x\n",
+			dev->sram_addr, dev->sram_size);
+	dev->got_sram = 1;
+
+	if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
+		dev_warn(&dev->pdev->dev, "SRAM request failed\n");
+		dev->got_sram = 0;
+	} else if (!dma_declare_coherent_memory(&pdev->dev, dev->sram_addr,
+			dev->sram_addr, dev->sram_size, DMA_MEMORY_MAP)) {
+		dev_warn(&dev->pdev->dev, "SRAM DMA declare failed\n");
+		pci_release_region(pdev, 1);
+		dev->got_sram = 0;
+	}
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* release SRAM caching */
+static void sram_deinit(struct langwell_udc *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	dma_release_declared_memory(&pdev->dev);
+	pci_release_region(pdev, 1);
+
+	dev->got_sram = 0;
+
+	dev_info(&dev->pdev->dev, "release SRAM caching\n");
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+
+/* tear down the binding between this driver and the pci device */
+static void langwell_udc_remove(struct pci_dev *pdev)
+{
+	struct langwell_udc	*dev = pci_get_drvdata(pdev);
+
+	DECLARE_COMPLETION(done);
+
+	BUG_ON(dev->driver);
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	dev->done = &done;
+
+	/* free dTD dma_pool and dQH */
+	if (dev->dtd_pool)
+		dma_pool_destroy(dev->dtd_pool);
+
+	if (dev->ep_dqh)
+		dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
+			dev->ep_dqh, dev->ep_dqh_dma);
+
+	/* release SRAM caching */
+	if (dev->has_sram && dev->got_sram)
+		sram_deinit(dev);
+
+	if (dev->status_req) {
+		kfree(dev->status_req->req.buf);
+		kfree(dev->status_req);
+	}
+
+	kfree(dev->ep);
+
+	/* disable IRQ handler */
+	if (dev->got_irq)
+		free_irq(pdev->irq, dev);
+
+	if (dev->cap_regs)
+		iounmap(dev->cap_regs);
+
+	if (dev->region)
+		release_mem_region(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
+
+	if (dev->enabled)
+		pci_disable_device(pdev);
+
+	dev->cap_regs = NULL;
+
+	dev_info(&dev->pdev->dev, "unbind\n");
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
+	device_unregister(&dev->gadget.dev);
+	device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
+	device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
+
+	pci_set_drvdata(pdev, NULL);
+
+	/* free dev, wait for the release() finished */
+	wait_for_completion(&done);
+}
+
+
+/*
+ * wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+static int langwell_udc_probe(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	struct langwell_udc	*dev;
+	unsigned long		resource, len;
+	void			__iomem *base = NULL;
+	size_t			size;
+	int			retval;
+
+	/* alloc, and start init */
+	dev = kzalloc(sizeof *dev, GFP_KERNEL);
+	if (dev == NULL) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	/* initialize device spinlock */
+	spin_lock_init(&dev->lock);
+
+	dev->pdev = pdev;
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	pci_set_drvdata(pdev, dev);
+
+	/* now all the pci goodies ... */
+	if (pci_enable_device(pdev) < 0) {
+		retval = -ENODEV;
+		goto error;
+	}
+	dev->enabled = 1;
+
+	/* control register: BAR 0 */
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(resource, len, driver_name)) {
+		dev_err(&dev->pdev->dev, "controller already in use\n");
+		retval = -EBUSY;
+		goto error;
+	}
+	dev->region = 1;
+
+	base = ioremap_nocache(resource, len);
+	if (base == NULL) {
+		dev_err(&dev->pdev->dev, "can't map memory\n");
+		retval = -EFAULT;
+		goto error;
+	}
+
+	dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
+	dev_vdbg(&dev->pdev->dev, "dev->cap_regs: %p\n", dev->cap_regs);
+	dev->op_regs = (struct langwell_op_regs __iomem *)
+		(base + OP_REG_OFFSET);
+	dev_vdbg(&dev->pdev->dev, "dev->op_regs: %p\n", dev->op_regs);
+
+	/* irq setup after old hardware is cleaned up */
+	if (!pdev->irq) {
+		dev_err(&dev->pdev->dev, "No IRQ. Check PCI setup!\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+	dev->has_sram = 1;
+	dev->got_sram = 0;
+	dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram);
+
+	/* enable SRAM caching if detected */
+	if (dev->has_sram && !dev->got_sram)
+		sram_init(dev);
+
+	dev_info(&dev->pdev->dev,
+			"irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
+			pdev->irq, resource, len, base);
+	/* enables bus-mastering for device dev */
+	pci_set_master(pdev);
+
+	if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
+				driver_name, dev) != 0) {
+		dev_err(&dev->pdev->dev,
+				"request interrupt %d failed\n", pdev->irq);
+		retval = -EBUSY;
+		goto error;
+	}
+	dev->got_irq = 1;
+
+	/* set stopped bit */
+	dev->stopped = 1;
+
+	/* capabilities and endpoint number */
+	dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
+	dev->dciversion = readw(&dev->cap_regs->dciversion);
+	dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
+	dev_vdbg(&dev->pdev->dev, "dev->lpm: %d\n", dev->lpm);
+	dev_vdbg(&dev->pdev->dev, "dev->dciversion: 0x%04x\n",
+			dev->dciversion);
+	dev_vdbg(&dev->pdev->dev, "dccparams: 0x%08x\n",
+			readl(&dev->cap_regs->dccparams));
+	dev_vdbg(&dev->pdev->dev, "dev->devcap: %d\n", dev->devcap);
+	if (!dev->devcap) {
+		dev_err(&dev->pdev->dev, "can't support device mode\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* a pair of endpoints (out/in) for each address */
+	dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
+	dev_vdbg(&dev->pdev->dev, "dev->ep_max: %d\n", dev->ep_max);
+
+	/* allocate endpoints memory */
+	dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
+			GFP_KERNEL);
+	if (!dev->ep) {
+		dev_err(&dev->pdev->dev, "allocate endpoints memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	/* allocate device dQH memory */
+	size = dev->ep_max * sizeof(struct langwell_dqh);
+	dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
+	if (size < DQH_ALIGNMENT)
+		size = DQH_ALIGNMENT;
+	else if ((size % DQH_ALIGNMENT) != 0) {
+		size += DQH_ALIGNMENT + 1;
+		size &= ~(DQH_ALIGNMENT - 1);
+	}
+	dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
+					&dev->ep_dqh_dma, GFP_KERNEL);
+	if (!dev->ep_dqh) {
+		dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+	dev->ep_dqh_size = size;
+	dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
+
+	/* initialize ep0 status request structure */
+	dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
+	if (!dev->status_req) {
+		dev_err(&dev->pdev->dev,
+				"allocate status_req memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+	INIT_LIST_HEAD(&dev->status_req->queue);
+
+	/* allocate a small amount of memory to get valid address */
+	dev->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+	dev->status_req->req.dma = virt_to_phys(dev->status_req->req.buf);
+
+	dev->resume_state = USB_STATE_NOTATTACHED;
+	dev->usb_state = USB_STATE_POWERED;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	/* remote wakeup reset to 0 when the device is reset */
+	dev->remote_wakeup = 0;
+	dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
+
+	/* reset device controller */
+	langwell_udc_reset(dev);
+
+	/* initialize gadget structure */
+	dev->gadget.ops = &langwell_ops;	/* usb_gadget_ops */
+	dev->gadget.ep0 = &dev->ep[0].ep;	/* gadget ep0 */
+	INIT_LIST_HEAD(&dev->gadget.ep_list);	/* ep_list */
+	dev->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
+	dev->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&dev->gadget.dev, "gadget");
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = driver_name;		/* gadget name */
+
+	/* controller endpoints reinit */
+	eps_reinit(dev);
+
+	/* reset ep0 dQH and endptctrl */
+	ep0_reset(dev);
+
+	/* create dTD dma_pool resource */
+	dev->dtd_pool = dma_pool_create("langwell_dtd",
+			&dev->pdev->dev,
+			sizeof(struct langwell_dtd),
+			DTD_ALIGNMENT,
+			DMA_BOUNDARY);
+
+	if (!dev->dtd_pool) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	/* done */
+	dev_info(&dev->pdev->dev, "%s\n", driver_desc);
+	dev_info(&dev->pdev->dev, "irq %d, pci mem %p\n", pdev->irq, base);
+	dev_info(&dev->pdev->dev, "Driver version: " DRIVER_VERSION "\n");
+	dev_info(&dev->pdev->dev, "Support (max) %d endpoints\n", dev->ep_max);
+	dev_info(&dev->pdev->dev, "Device interface version: 0x%04x\n",
+			dev->dciversion);
+	dev_info(&dev->pdev->dev, "Controller mode: %s\n",
+			dev->devcap ? "Device" : "Host");
+	dev_info(&dev->pdev->dev, "Support USB LPM: %s\n",
+			dev->lpm ? "Yes" : "No");
+
+	dev_vdbg(&dev->pdev->dev,
+			"After langwell_udc_probe(), print all registers:\n");
+	print_all_registers(dev);
+
+	retval = device_register(&dev->gadget.dev);
+	if (retval)
+		goto error;
+
+	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+	if (retval)
+		goto error;
+
+	retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
+	if (retval)
+		goto error;
+
+	retval = device_create_file(&pdev->dev, &dev_attr_remote_wakeup);
+	if (retval)
+		goto error_attr1;
+
+	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+
+error_attr1:
+	device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
+error:
+	if (dev) {
+		dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+		langwell_udc_remove(pdev);
+	}
+
+	return retval;
+}
+
+
+/* device controller suspend */
+static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct langwell_udc	*dev = pci_get_drvdata(pdev);
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	usb_del_gadget_udc(&dev->gadget);
+	/* disable interrupt and set controller to stop state */
+	langwell_udc_stop(dev);
+
+	/* disable IRQ handler */
+	if (dev->got_irq)
+		free_irq(pdev->irq, dev);
+	dev->got_irq = 0;
+
+	/* save PCI state */
+	pci_save_state(pdev);
+
+	spin_lock_irq(&dev->lock);
+	/* stop all usb activities */
+	stop_activity(dev);
+	spin_unlock_irq(&dev->lock);
+
+	/* free dTD dma_pool and dQH */
+	if (dev->dtd_pool)
+		dma_pool_destroy(dev->dtd_pool);
+
+	if (dev->ep_dqh)
+		dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
+			dev->ep_dqh, dev->ep_dqh_dma);
+
+	/* release SRAM caching */
+	if (dev->has_sram && dev->got_sram)
+		sram_deinit(dev);
+
+	/* set device power state */
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	/* enter PHY low power suspend */
+	if (dev->pdev->device != 0x0829)
+		langwell_phy_low_power(dev, 1);
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* device controller resume */
+static int langwell_udc_resume(struct pci_dev *pdev)
+{
+	struct langwell_udc	*dev = pci_get_drvdata(pdev);
+	size_t			size;
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* exit PHY low power suspend */
+	if (dev->pdev->device != 0x0829)
+		langwell_phy_low_power(dev, 0);
+
+	/* set device D0 power state */
+	pci_set_power_state(pdev, PCI_D0);
+
+	/* enable SRAM caching if detected */
+	if (dev->has_sram && !dev->got_sram)
+		sram_init(dev);
+
+	/* allocate device dQH memory */
+	size = dev->ep_max * sizeof(struct langwell_dqh);
+	dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
+	if (size < DQH_ALIGNMENT)
+		size = DQH_ALIGNMENT;
+	else if ((size % DQH_ALIGNMENT) != 0) {
+		size += DQH_ALIGNMENT + 1;
+		size &= ~(DQH_ALIGNMENT - 1);
+	}
+	dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
+					&dev->ep_dqh_dma, GFP_KERNEL);
+	if (!dev->ep_dqh) {
+		dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
+		return -ENOMEM;
+	}
+	dev->ep_dqh_size = size;
+	dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
+
+	/* create dTD dma_pool resource */
+	dev->dtd_pool = dma_pool_create("langwell_dtd",
+			&dev->pdev->dev,
+			sizeof(struct langwell_dtd),
+			DTD_ALIGNMENT,
+			DMA_BOUNDARY);
+
+	if (!dev->dtd_pool)
+		return -ENOMEM;
+
+	/* restore PCI state */
+	pci_restore_state(pdev);
+
+	/* enable IRQ handler */
+	if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
+				driver_name, dev) != 0) {
+		dev_err(&dev->pdev->dev, "request interrupt %d failed\n",
+				pdev->irq);
+		return -EBUSY;
+	}
+	dev->got_irq = 1;
+
+	/* reset and start controller to run state */
+	if (dev->stopped) {
+		/* reset device controller */
+		langwell_udc_reset(dev);
+
+		/* reset ep0 dQH and endptctrl */
+		ep0_reset(dev);
+
+		/* start device if gadget is loaded */
+		if (dev->driver)
+			langwell_udc_start(dev);
+	}
+
+	/* reset USB status */
+	dev->usb_state = USB_STATE_ATTACHED;
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* pci driver shutdown */
+static void langwell_udc_shutdown(struct pci_dev *pdev)
+{
+	struct langwell_udc	*dev = pci_get_drvdata(pdev);
+	u32			usbmode;
+
+	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+	/* reset controller mode to IDLE */
+	usbmode = readl(&dev->op_regs->usbmode);
+	dev_dbg(&dev->pdev->dev, "usbmode = 0x%08x\n", usbmode);
+	usbmode &= (~3 | MODE_IDLE);
+	writel(usbmode, &dev->op_regs->usbmode);
+
+	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct pci_device_id pci_ids[] = { {
+	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+	.class_mask =	~0,
+	.vendor =	0x8086,
+	.device =	0x0811,
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+}, { /* end: all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+
+static struct pci_driver langwell_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+
+	.probe =	langwell_udc_probe,
+	.remove =	langwell_udc_remove,
+
+	/* device controller suspend/resume */
+	.suspend =	langwell_udc_suspend,
+	.resume =	langwell_udc_resume,
+
+	.shutdown =	langwell_udc_shutdown,
+};
+
+
+static int __init init(void)
+{
+	return pci_register_driver(&langwell_pci_driver);
+}
+module_init(init);
+
+
+static void __exit cleanup(void)
+{
+	pci_unregister_driver(&langwell_pci_driver);
+}
+module_exit(cleanup);
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/langwell_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/langwell_udc.h
new file mode 100644
index 0000000..8c8087a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/langwell_udc.h
@@ -0,0 +1,224 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/usb/langwell_udc.h>
+
+/*-------------------------------------------------------------------------*/
+
+/* driver data structures and utilities */
+
+/*
+ * dTD: Device Endpoint Transfer Descriptor
+ * describe to the device controller the location and quantity of
+ * data to be send/received for given transfer
+ */
+struct langwell_dtd {
+	u32	dtd_next;
+/* bits 31:5, next transfer element pointer */
+#define	DTD_NEXT(d)	(((d)>>5)&0x7ffffff)
+#define	DTD_NEXT_MASK	(0x7ffffff << 5)
+/* terminate */
+#define	DTD_TERM	BIT(0)
+	/* bits 7:0, execution back states */
+	u32	dtd_status:8;
+#define	DTD_STATUS(d)	(((d)>>0)&0xff)
+#define	DTD_STS_ACTIVE	BIT(7)	/* active */
+#define	DTD_STS_HALTED	BIT(6)	/* halted */
+#define	DTD_STS_DBE	BIT(5)	/* data buffer error */
+#define	DTD_STS_TRE	BIT(3)	/* transaction error  */
+	/* bits 9:8 */
+	u32	dtd_res0:2;
+	/* bits 11:10, multipier override */
+	u32	dtd_multo:2;
+#define	DTD_MULTO	(BIT(11) | BIT(10))
+	/* bits 14:12 */
+	u32	dtd_res1:3;
+	/* bit 15, interrupt on complete */
+	u32	dtd_ioc:1;
+#define	DTD_IOC		BIT(15)
+	/* bits 30:16, total bytes */
+	u32	dtd_total:15;
+#define	DTD_TOTAL(d)	(((d)>>16)&0x7fff)
+#define	DTD_MAX_TRANSFER_LENGTH	0x4000
+	/* bit 31 */
+	u32	dtd_res2:1;
+	/* dTD buffer pointer page 0 to 4 */
+	u32	dtd_buf[5];
+#define	DTD_OFFSET_MASK	0xfff
+/* bits 31:12, buffer pointer */
+#define	DTD_BUFFER(d)	(((d)>>12)&0x3ff)
+/* bits 11:0, current offset */
+#define	DTD_C_OFFSET(d)	(((d)>>0)&0xfff)
+/* bits 10:0, frame number */
+#define	DTD_FRAME(d)	(((d)>>0)&0x7ff)
+
+	/* driver-private parts */
+
+	/* dtd dma address */
+	dma_addr_t		dtd_dma;
+	/* next dtd virtual address */
+	struct langwell_dtd	*next_dtd_virt;
+};
+
+
+/*
+ * dQH: Device Endpoint Queue Head
+ * describe where all transfers are managed
+ * 48-byte data structure, aligned on 64-byte boundary
+ *
+ * These are associated with dTD structure
+ */
+struct langwell_dqh {
+	/* endpoint capabilities and characteristics */
+	u32	dqh_res0:15;	/* bits 14:0 */
+	u32	dqh_ios:1;	/* bit 15, interrupt on setup */
+#define	DQH_IOS		BIT(15)
+	u32	dqh_mpl:11;	/* bits 26:16, maximum packet length */
+#define	DQH_MPL		(0x7ff << 16)
+	u32	dqh_res1:2;	/* bits 28:27 */
+	u32	dqh_zlt:1;	/* bit 29, zero length termination */
+#define	DQH_ZLT		BIT(29)
+	u32	dqh_mult:2;	/* bits 31:30 */
+#define	DQH_MULT	(BIT(30) | BIT(31))
+
+	/* current dTD pointer */
+	u32	dqh_current;	/* locate the transfer in progress */
+#define DQH_C_DTD(e)	\
+	(((e)>>5)&0x7ffffff)	/* bits 31:5, current dTD pointer */
+
+	/* transfer overlay, hardware parts of a struct langwell_dtd */
+	u32	dtd_next;
+	u32	dtd_status:8;	/* bits 7:0, execution back states */
+	u32	dtd_res0:2;	/* bits 9:8 */
+	u32	dtd_multo:2;	/* bits 11:10, multipier override */
+	u32	dtd_res1:3;	/* bits 14:12 */
+	u32	dtd_ioc:1;	/* bit 15, interrupt on complete */
+	u32	dtd_total:15;	/* bits 30:16, total bytes */
+	u32	dtd_res2:1;	/* bit 31 */
+	u32	dtd_buf[5];	/* dTD buffer pointer page 0 to 4 */
+
+	u32	dqh_res2;
+	struct usb_ctrlrequest	dqh_setup;	/* setup packet buffer */
+} __attribute__ ((aligned(64)));
+
+
+/* endpoint data structure */
+struct langwell_ep {
+	struct usb_ep		ep;
+	dma_addr_t		dma;
+	struct langwell_udc	*dev;
+	unsigned long		irqs;
+	struct list_head	queue;
+	struct langwell_dqh	*dqh;
+	const struct usb_endpoint_descriptor	*desc;
+	char			name[14];
+	unsigned		stopped:1,
+				ep_type:2,
+				ep_num:8;
+};
+
+
+/* request data structure */
+struct langwell_request {
+	struct usb_request	req;
+	struct langwell_dtd	*dtd, *head, *tail;
+	struct langwell_ep	*ep;
+	dma_addr_t		dtd_dma;
+	struct list_head	queue;
+	unsigned		dtd_count;
+	unsigned		mapped:1;
+};
+
+
+/* ep0 transfer state */
+enum ep0_state {
+	WAIT_FOR_SETUP,
+	DATA_STATE_XMIT,
+	DATA_STATE_NEED_ZLP,
+	WAIT_FOR_OUT_STATUS,
+	DATA_STATE_RECV,
+};
+
+
+/* device suspend state */
+enum lpm_state {
+	LPM_L0,	/* on */
+	LPM_L1,	/* LPM L1 sleep */
+	LPM_L2,	/* suspend */
+	LPM_L3,	/* off */
+};
+
+
+/* device data structure */
+struct langwell_udc {
+	/* each pci device provides one gadget, several endpoints */
+	struct usb_gadget	gadget;
+	spinlock_t		lock;	/* device lock */
+	struct langwell_ep	*ep;
+	struct usb_gadget_driver	*driver;
+	struct usb_phy		*transceiver;
+	u8			dev_addr;
+	u32			usb_state;
+	u32			resume_state;
+	u32			bus_reset;
+	enum lpm_state		lpm_state;
+	enum ep0_state		ep0_state;
+	u32			ep0_dir;
+	u16			dciversion;
+	unsigned		ep_max;
+	unsigned		devcap:1,
+				enabled:1,
+				region:1,
+				got_irq:1,
+				powered:1,
+				remote_wakeup:1,
+				rate:1,
+				is_reset:1,
+				softconnected:1,
+				vbus_active:1,
+				suspended:1,
+				stopped:1,
+				lpm:1,		/* LPM capability */
+				has_sram:1,	/* SRAM caching */
+				got_sram:1;
+
+	/* pci state used to access those endpoints */
+	struct pci_dev		*pdev;
+
+	/* Langwell otg transceiver */
+	struct langwell_otg	*lotg;
+
+	/* control registers */
+	struct langwell_cap_regs	__iomem	*cap_regs;
+	struct langwell_op_regs		__iomem	*op_regs;
+
+	struct usb_ctrlrequest	local_setup_buff;
+	struct langwell_dqh	*ep_dqh;
+	size_t			ep_dqh_size;
+	dma_addr_t		ep_dqh_dma;
+
+	/* ep0 status request */
+	struct langwell_request	*status_req;
+
+	/* dma pool */
+	struct dma_pool		*dtd_pool;
+
+	/* make sure release() is done */
+	struct completion	*done;
+
+	/* for private SRAM caching */
+	unsigned int		sram_addr;
+	unsigned int		sram_size;
+
+	/* device status data for get_status request */
+	u16			dev_status;
+};
+
+#define gadget_to_langwell(g)	container_of((g), struct langwell_udc, gadget)
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/m66592-udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/m66592-udc.c
new file mode 100644
index 0000000..3608b3b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/m66592-udc.c
@@ -0,0 +1,1773 @@
+/*
+ * M66592 UDC (USB gadget)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "m66592-udc.h"
+
+MODULE_DESCRIPTION("M66592 USB gadget driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:m66592_udc");
+
+#define DRIVER_VERSION	"21 July 2009"
+
+static const char udc_name[] = "m66592_udc";
+static const char *m66592_ep_name[] = {
+	"ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7"
+};
+
+static void disable_controller(struct m66592 *m66592);
+static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req);
+static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req);
+static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
+			gfp_t gfp_flags);
+
+static void transfer_complete(struct m66592_ep *ep,
+		struct m66592_request *req, int status);
+
+/*-------------------------------------------------------------------------*/
+static inline u16 get_usb_speed(struct m66592 *m66592)
+{
+	return (m66592_read(m66592, M66592_DVSTCTR) & M66592_RHST);
+}
+
+static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum,
+		unsigned long reg)
+{
+	u16 tmp;
+
+	tmp = m66592_read(m66592, M66592_INTENB0);
+	m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
+			M66592_INTENB0);
+	m66592_bset(m66592, (1 << pipenum), reg);
+	m66592_write(m66592, tmp, M66592_INTENB0);
+}
+
+static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum,
+		unsigned long reg)
+{
+	u16 tmp;
+
+	tmp = m66592_read(m66592, M66592_INTENB0);
+	m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
+			M66592_INTENB0);
+	m66592_bclr(m66592, (1 << pipenum), reg);
+	m66592_write(m66592, tmp, M66592_INTENB0);
+}
+
+static void m66592_usb_connect(struct m66592 *m66592)
+{
+	m66592_bset(m66592, M66592_CTRE, M66592_INTENB0);
+	m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
+			M66592_INTENB0);
+	m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
+
+	m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
+}
+
+static void m66592_usb_disconnect(struct m66592 *m66592)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+	m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0);
+	m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
+			M66592_INTENB0);
+	m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
+	m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+
+	m66592->gadget.speed = USB_SPEED_UNKNOWN;
+	spin_unlock(&m66592->lock);
+	m66592->driver->disconnect(&m66592->gadget);
+	spin_lock(&m66592->lock);
+
+	disable_controller(m66592);
+	INIT_LIST_HEAD(&m66592->ep[0].queue);
+}
+
+static inline u16 control_reg_get_pid(struct m66592 *m66592, u16 pipenum)
+{
+	u16 pid = 0;
+	unsigned long offset;
+
+	if (pipenum == 0)
+		pid = m66592_read(m66592, M66592_DCPCTR) & M66592_PID;
+	else if (pipenum < M66592_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		pid = m66592_read(m66592, offset) & M66592_PID;
+	} else
+		pr_err("unexpect pipe num (%d)\n", pipenum);
+
+	return pid;
+}
+
+static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum,
+		u16 pid)
+{
+	unsigned long offset;
+
+	if (pipenum == 0)
+		m66592_mdfy(m66592, pid, M66592_PID, M66592_DCPCTR);
+	else if (pipenum < M66592_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		m66592_mdfy(m66592, pid, M66592_PID, offset);
+	} else
+		pr_err("unexpect pipe num (%d)\n", pipenum);
+}
+
+static inline void pipe_start(struct m66592 *m66592, u16 pipenum)
+{
+	control_reg_set_pid(m66592, pipenum, M66592_PID_BUF);
+}
+
+static inline void pipe_stop(struct m66592 *m66592, u16 pipenum)
+{
+	control_reg_set_pid(m66592, pipenum, M66592_PID_NAK);
+}
+
+static inline void pipe_stall(struct m66592 *m66592, u16 pipenum)
+{
+	control_reg_set_pid(m66592, pipenum, M66592_PID_STALL);
+}
+
+static inline u16 control_reg_get(struct m66592 *m66592, u16 pipenum)
+{
+	u16 ret = 0;
+	unsigned long offset;
+
+	if (pipenum == 0)
+		ret = m66592_read(m66592, M66592_DCPCTR);
+	else if (pipenum < M66592_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		ret = m66592_read(m66592, offset);
+	} else
+		pr_err("unexpect pipe num (%d)\n", pipenum);
+
+	return ret;
+}
+
+static inline void control_reg_sqclr(struct m66592 *m66592, u16 pipenum)
+{
+	unsigned long offset;
+
+	pipe_stop(m66592, pipenum);
+
+	if (pipenum == 0)
+		m66592_bset(m66592, M66592_SQCLR, M66592_DCPCTR);
+	else if (pipenum < M66592_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		m66592_bset(m66592, M66592_SQCLR, offset);
+	} else
+		pr_err("unexpect pipe num(%d)\n", pipenum);
+}
+
+static inline int get_buffer_size(struct m66592 *m66592, u16 pipenum)
+{
+	u16 tmp;
+	int size;
+
+	if (pipenum == 0) {
+		tmp = m66592_read(m66592, M66592_DCPCFG);
+		if ((tmp & M66592_CNTMD) != 0)
+			size = 256;
+		else {
+			tmp = m66592_read(m66592, M66592_DCPMAXP);
+			size = tmp & M66592_MAXP;
+		}
+	} else {
+		m66592_write(m66592, pipenum, M66592_PIPESEL);
+		tmp = m66592_read(m66592, M66592_PIPECFG);
+		if ((tmp & M66592_CNTMD) != 0) {
+			tmp = m66592_read(m66592, M66592_PIPEBUF);
+			size = ((tmp >> 10) + 1) * 64;
+		} else {
+			tmp = m66592_read(m66592, M66592_PIPEMAXP);
+			size = tmp & M66592_MXPS;
+		}
+	}
+
+	return size;
+}
+
+static inline void pipe_change(struct m66592 *m66592, u16 pipenum)
+{
+	struct m66592_ep *ep = m66592->pipenum2ep[pipenum];
+	unsigned short mbw;
+
+	if (ep->use_dma)
+		return;
+
+	m66592_mdfy(m66592, pipenum, M66592_CURPIPE, ep->fifosel);
+
+	ndelay(450);
+
+	if (m66592->pdata->on_chip)
+		mbw = M66592_MBW_32;
+	else
+		mbw = M66592_MBW_16;
+
+	m66592_bset(m66592, mbw, ep->fifosel);
+}
+
+static int pipe_buffer_setting(struct m66592 *m66592,
+		struct m66592_pipe_info *info)
+{
+	u16 bufnum = 0, buf_bsize = 0;
+	u16 pipecfg = 0;
+
+	if (info->pipe == 0)
+		return -EINVAL;
+
+	m66592_write(m66592, info->pipe, M66592_PIPESEL);
+
+	if (info->dir_in)
+		pipecfg |= M66592_DIR;
+	pipecfg |= info->type;
+	pipecfg |= info->epnum;
+	switch (info->type) {
+	case M66592_INT:
+		bufnum = 4 + (info->pipe - M66592_BASE_PIPENUM_INT);
+		buf_bsize = 0;
+		break;
+	case M66592_BULK:
+		/* isochronous pipes may be used as bulk pipes */
+		if (info->pipe >= M66592_BASE_PIPENUM_BULK)
+			bufnum = info->pipe - M66592_BASE_PIPENUM_BULK;
+		else
+			bufnum = info->pipe - M66592_BASE_PIPENUM_ISOC;
+
+		bufnum = M66592_BASE_BUFNUM + (bufnum * 16);
+		buf_bsize = 7;
+		pipecfg |= M66592_DBLB;
+		if (!info->dir_in)
+			pipecfg |= M66592_SHTNAK;
+		break;
+	case M66592_ISO:
+		bufnum = M66592_BASE_BUFNUM +
+			 (info->pipe - M66592_BASE_PIPENUM_ISOC) * 16;
+		buf_bsize = 7;
+		break;
+	}
+
+	if (buf_bsize && ((bufnum + 16) >= M66592_MAX_BUFNUM)) {
+		pr_err("m66592 pipe memory is insufficient\n");
+		return -ENOMEM;
+	}
+
+	m66592_write(m66592, pipecfg, M66592_PIPECFG);
+	m66592_write(m66592, (buf_bsize << 10) | (bufnum), M66592_PIPEBUF);
+	m66592_write(m66592, info->maxpacket, M66592_PIPEMAXP);
+	if (info->interval)
+		info->interval--;
+	m66592_write(m66592, info->interval, M66592_PIPEPERI);
+
+	return 0;
+}
+
+static void pipe_buffer_release(struct m66592 *m66592,
+				struct m66592_pipe_info *info)
+{
+	if (info->pipe == 0)
+		return;
+
+	if (is_bulk_pipe(info->pipe)) {
+		m66592->bulk--;
+	} else if (is_interrupt_pipe(info->pipe))
+		m66592->interrupt--;
+	else if (is_isoc_pipe(info->pipe)) {
+		m66592->isochronous--;
+		if (info->type == M66592_BULK)
+			m66592->bulk--;
+	} else
+		pr_err("ep_release: unexpect pipenum (%d)\n",
+				info->pipe);
+}
+
+static void pipe_initialize(struct m66592_ep *ep)
+{
+	struct m66592 *m66592 = ep->m66592;
+	unsigned short mbw;
+
+	m66592_mdfy(m66592, 0, M66592_CURPIPE, ep->fifosel);
+
+	m66592_write(m66592, M66592_ACLRM, ep->pipectr);
+	m66592_write(m66592, 0, ep->pipectr);
+	m66592_write(m66592, M66592_SQCLR, ep->pipectr);
+	if (ep->use_dma) {
+		m66592_mdfy(m66592, ep->pipenum, M66592_CURPIPE, ep->fifosel);
+
+		ndelay(450);
+
+		if (m66592->pdata->on_chip)
+			mbw = M66592_MBW_32;
+		else
+			mbw = M66592_MBW_16;
+
+		m66592_bset(m66592, mbw, ep->fifosel);
+	}
+}
+
+static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep,
+		const struct usb_endpoint_descriptor *desc,
+		u16 pipenum, int dma)
+{
+	if ((pipenum != 0) && dma) {
+		if (m66592->num_dma == 0) {
+			m66592->num_dma++;
+			ep->use_dma = 1;
+			ep->fifoaddr = M66592_D0FIFO;
+			ep->fifosel = M66592_D0FIFOSEL;
+			ep->fifoctr = M66592_D0FIFOCTR;
+			ep->fifotrn = M66592_D0FIFOTRN;
+		} else if (!m66592->pdata->on_chip && m66592->num_dma == 1) {
+			m66592->num_dma++;
+			ep->use_dma = 1;
+			ep->fifoaddr = M66592_D1FIFO;
+			ep->fifosel = M66592_D1FIFOSEL;
+			ep->fifoctr = M66592_D1FIFOCTR;
+			ep->fifotrn = M66592_D1FIFOTRN;
+		} else {
+			ep->use_dma = 0;
+			ep->fifoaddr = M66592_CFIFO;
+			ep->fifosel = M66592_CFIFOSEL;
+			ep->fifoctr = M66592_CFIFOCTR;
+			ep->fifotrn = 0;
+		}
+	} else {
+		ep->use_dma = 0;
+		ep->fifoaddr = M66592_CFIFO;
+		ep->fifosel = M66592_CFIFOSEL;
+		ep->fifoctr = M66592_CFIFOCTR;
+		ep->fifotrn = 0;
+	}
+
+	ep->pipectr = get_pipectr_addr(pipenum);
+	ep->pipenum = pipenum;
+	ep->ep.maxpacket = usb_endpoint_maxp(desc);
+	m66592->pipenum2ep[pipenum] = ep;
+	m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep;
+	INIT_LIST_HEAD(&ep->queue);
+}
+
+static void m66592_ep_release(struct m66592_ep *ep)
+{
+	struct m66592 *m66592 = ep->m66592;
+	u16 pipenum = ep->pipenum;
+
+	if (pipenum == 0)
+		return;
+
+	if (ep->use_dma)
+		m66592->num_dma--;
+	ep->pipenum = 0;
+	ep->busy = 0;
+	ep->use_dma = 0;
+}
+
+static int alloc_pipe_config(struct m66592_ep *ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct m66592 *m66592 = ep->m66592;
+	struct m66592_pipe_info info;
+	int dma = 0;
+	int *counter;
+	int ret;
+
+	ep->desc = desc;
+
+	BUG_ON(ep->pipenum);
+
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:
+		if (m66592->bulk >= M66592_MAX_NUM_BULK) {
+			if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
+				pr_err("bulk pipe is insufficient\n");
+				return -ENODEV;
+			} else {
+				info.pipe = M66592_BASE_PIPENUM_ISOC
+						+ m66592->isochronous;
+				counter = &m66592->isochronous;
+			}
+		} else {
+			info.pipe = M66592_BASE_PIPENUM_BULK + m66592->bulk;
+			counter = &m66592->bulk;
+		}
+		info.type = M66592_BULK;
+		dma = 1;
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		if (m66592->interrupt >= M66592_MAX_NUM_INT) {
+			pr_err("interrupt pipe is insufficient\n");
+			return -ENODEV;
+		}
+		info.pipe = M66592_BASE_PIPENUM_INT + m66592->interrupt;
+		info.type = M66592_INT;
+		counter = &m66592->interrupt;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
+			pr_err("isochronous pipe is insufficient\n");
+			return -ENODEV;
+		}
+		info.pipe = M66592_BASE_PIPENUM_ISOC + m66592->isochronous;
+		info.type = M66592_ISO;
+		counter = &m66592->isochronous;
+		break;
+	default:
+		pr_err("unexpect xfer type\n");
+		return -EINVAL;
+	}
+	ep->type = info.type;
+
+	info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+	info.maxpacket = usb_endpoint_maxp(desc);
+	info.interval = desc->bInterval;
+	if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+		info.dir_in = 1;
+	else
+		info.dir_in = 0;
+
+	ret = pipe_buffer_setting(m66592, &info);
+	if (ret < 0) {
+		pr_err("pipe_buffer_setting fail\n");
+		return ret;
+	}
+
+	(*counter)++;
+	if ((counter == &m66592->isochronous) && info.type == M66592_BULK)
+		m66592->bulk++;
+
+	m66592_ep_setting(m66592, ep, desc, info.pipe, dma);
+	pipe_initialize(ep);
+
+	return 0;
+}
+
+static int free_pipe_config(struct m66592_ep *ep)
+{
+	struct m66592 *m66592 = ep->m66592;
+	struct m66592_pipe_info info;
+
+	info.pipe = ep->pipenum;
+	info.type = ep->type;
+	pipe_buffer_release(m66592, &info);
+	m66592_ep_release(ep);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static void pipe_irq_enable(struct m66592 *m66592, u16 pipenum)
+{
+	enable_irq_ready(m66592, pipenum);
+	enable_irq_nrdy(m66592, pipenum);
+}
+
+static void pipe_irq_disable(struct m66592 *m66592, u16 pipenum)
+{
+	disable_irq_ready(m66592, pipenum);
+	disable_irq_nrdy(m66592, pipenum);
+}
+
+/* if complete is true, gadget driver complete function is not call */
+static void control_end(struct m66592 *m66592, unsigned ccpl)
+{
+	m66592->ep[0].internal_ccpl = ccpl;
+	pipe_start(m66592, 0);
+	m66592_bset(m66592, M66592_CCPL, M66592_DCPCTR);
+}
+
+static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+	struct m66592 *m66592 = ep->m66592;
+
+	pipe_change(m66592, ep->pipenum);
+	m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0,
+			(M66592_ISEL | M66592_CURPIPE),
+			M66592_CFIFOSEL);
+	m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+	if (req->req.length == 0) {
+		m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+		pipe_start(m66592, 0);
+		transfer_complete(ep, req, 0);
+	} else {
+		m66592_write(m66592, ~M66592_BEMP0, M66592_BEMPSTS);
+		irq_ep0_write(ep, req);
+	}
+}
+
+static void start_packet_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+	struct m66592 *m66592 = ep->m66592;
+	u16 tmp;
+
+	pipe_change(m66592, ep->pipenum);
+	disable_irq_empty(m66592, ep->pipenum);
+	pipe_start(m66592, ep->pipenum);
+
+	tmp = m66592_read(m66592, ep->fifoctr);
+	if (unlikely((tmp & M66592_FRDY) == 0))
+		pipe_irq_enable(m66592, ep->pipenum);
+	else
+		irq_packet_write(ep, req);
+}
+
+static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req)
+{
+	struct m66592 *m66592 = ep->m66592;
+	u16 pipenum = ep->pipenum;
+
+	if (ep->pipenum == 0) {
+		m66592_mdfy(m66592, M66592_PIPE0,
+				(M66592_ISEL | M66592_CURPIPE),
+				M66592_CFIFOSEL);
+		m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+		pipe_start(m66592, pipenum);
+		pipe_irq_enable(m66592, pipenum);
+	} else {
+		if (ep->use_dma) {
+			m66592_bset(m66592, M66592_TRCLR, ep->fifosel);
+			pipe_change(m66592, pipenum);
+			m66592_bset(m66592, M66592_TRENB, ep->fifosel);
+			m66592_write(m66592,
+				(req->req.length + ep->ep.maxpacket - 1)
+					/ ep->ep.maxpacket,
+				ep->fifotrn);
+		}
+		pipe_start(m66592, pipenum);	/* trigger once */
+		pipe_irq_enable(m66592, pipenum);
+	}
+}
+
+static void start_packet(struct m66592_ep *ep, struct m66592_request *req)
+{
+	if (ep->desc->bEndpointAddress & USB_DIR_IN)
+		start_packet_write(ep, req);
+	else
+		start_packet_read(ep, req);
+}
+
+static void start_ep0(struct m66592_ep *ep, struct m66592_request *req)
+{
+	u16 ctsq;
+
+	ctsq = m66592_read(ep->m66592, M66592_INTSTS0) & M66592_CTSQ;
+
+	switch (ctsq) {
+	case M66592_CS_RDDS:
+		start_ep0_write(ep, req);
+		break;
+	case M66592_CS_WRDS:
+		start_packet_read(ep, req);
+		break;
+
+	case M66592_CS_WRND:
+		control_end(ep->m66592, 0);
+		break;
+	default:
+		pr_err("start_ep0: unexpect ctsq(%x)\n", ctsq);
+		break;
+	}
+}
+
+static void init_controller(struct m66592 *m66592)
+{
+	unsigned int endian;
+
+	if (m66592->pdata->on_chip) {
+		if (m66592->pdata->endian)
+			endian = 0; /* big endian */
+		else
+			endian = M66592_LITTLE; /* little endian */
+
+		m66592_bset(m66592, M66592_HSE, M66592_SYSCFG);	/* High spd */
+		m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG);
+		m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+		m66592_bset(m66592, M66592_USBE, M66592_SYSCFG);
+
+		/* This is a workaound for SH7722 2nd cut */
+		m66592_bset(m66592, 0x8000, M66592_DVSTCTR);
+		m66592_bset(m66592, 0x1000, M66592_TESTMODE);
+		m66592_bclr(m66592, 0x8000, M66592_DVSTCTR);
+
+		m66592_bset(m66592, M66592_INTL, M66592_INTENB1);
+
+		m66592_write(m66592, 0, M66592_CFBCFG);
+		m66592_write(m66592, 0, M66592_D0FBCFG);
+		m66592_bset(m66592, endian, M66592_CFBCFG);
+		m66592_bset(m66592, endian, M66592_D0FBCFG);
+	} else {
+		unsigned int clock, vif, irq_sense;
+
+		if (m66592->pdata->endian)
+			endian = M66592_BIGEND; /* big endian */
+		else
+			endian = 0; /* little endian */
+
+		if (m66592->pdata->vif)
+			vif = M66592_LDRV; /* 3.3v */
+		else
+			vif = 0; /* 1.5v */
+
+		switch (m66592->pdata->xtal) {
+		case M66592_PLATDATA_XTAL_12MHZ:
+			clock = M66592_XTAL12;
+			break;
+		case M66592_PLATDATA_XTAL_24MHZ:
+			clock = M66592_XTAL24;
+			break;
+		case M66592_PLATDATA_XTAL_48MHZ:
+			clock = M66592_XTAL48;
+			break;
+		default:
+			pr_warning("m66592-udc: xtal configuration error\n");
+			clock = 0;
+		}
+
+		switch (m66592->irq_trigger) {
+		case IRQF_TRIGGER_LOW:
+			irq_sense = M66592_INTL;
+			break;
+		case IRQF_TRIGGER_FALLING:
+			irq_sense = 0;
+			break;
+		default:
+			pr_warning("m66592-udc: irq trigger config error\n");
+			irq_sense = 0;
+		}
+
+		m66592_bset(m66592,
+			    (vif & M66592_LDRV) | (endian & M66592_BIGEND),
+			    M66592_PINCFG);
+		m66592_bset(m66592, M66592_HSE, M66592_SYSCFG);	/* High spd */
+		m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL,
+			    M66592_SYSCFG);
+		m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG);
+		m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+		m66592_bset(m66592, M66592_USBE, M66592_SYSCFG);
+
+		m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG);
+
+		msleep(3);
+
+		m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG);
+
+		msleep(1);
+
+		m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG);
+
+		m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1);
+		m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR,
+			     M66592_DMA0CFG);
+	}
+}
+
+static void disable_controller(struct m66592 *m66592)
+{
+	m66592_bclr(m66592, M66592_UTST, M66592_TESTMODE);
+	if (!m66592->pdata->on_chip) {
+		m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG);
+		udelay(1);
+		m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG);
+		udelay(1);
+		m66592_bclr(m66592, M66592_RCKE, M66592_SYSCFG);
+		udelay(1);
+		m66592_bclr(m66592, M66592_XCKE, M66592_SYSCFG);
+	}
+}
+
+static void m66592_start_xclock(struct m66592 *m66592)
+{
+	u16 tmp;
+
+	if (!m66592->pdata->on_chip) {
+		tmp = m66592_read(m66592, M66592_SYSCFG);
+		if (!(tmp & M66592_XCKE))
+			m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+static void transfer_complete(struct m66592_ep *ep,
+		struct m66592_request *req, int status)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+	int restart = 0;
+
+	if (unlikely(ep->pipenum == 0)) {
+		if (ep->internal_ccpl) {
+			ep->internal_ccpl = 0;
+			return;
+		}
+	}
+
+	list_del_init(&req->queue);
+	if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
+		req->req.status = -ESHUTDOWN;
+	else
+		req->req.status = status;
+
+	if (!list_empty(&ep->queue))
+		restart = 1;
+
+	spin_unlock(&ep->m66592->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&ep->m66592->lock);
+
+	if (restart) {
+		req = list_entry(ep->queue.next, struct m66592_request, queue);
+		if (ep->desc)
+			start_packet(ep, req);
+	}
+}
+
+static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+	int i;
+	u16 tmp;
+	unsigned bufsize;
+	size_t size;
+	void *buf;
+	u16 pipenum = ep->pipenum;
+	struct m66592 *m66592 = ep->m66592;
+
+	pipe_change(m66592, pipenum);
+	m66592_bset(m66592, M66592_ISEL, ep->fifosel);
+
+	i = 0;
+	do {
+		tmp = m66592_read(m66592, ep->fifoctr);
+		if (i++ > 100000) {
+			pr_err("pipe0 is busy. maybe cpu i/o bus "
+				"conflict. please power off this controller.");
+			return;
+		}
+		ndelay(1);
+	} while ((tmp & M66592_FRDY) == 0);
+
+	/* prepare parameters */
+	bufsize = get_buffer_size(m66592, pipenum);
+	buf = req->req.buf + req->req.actual;
+	size = min(bufsize, req->req.length - req->req.actual);
+
+	/* write fifo */
+	if (req->req.buf) {
+		if (size > 0)
+			m66592_write_fifo(m66592, ep, buf, size);
+		if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
+			m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+	}
+
+	/* update parameters */
+	req->req.actual += size;
+
+	/* check transfer finish */
+	if ((!req->req.zero && (req->req.actual == req->req.length))
+			|| (size % ep->ep.maxpacket)
+			|| (size == 0)) {
+		disable_irq_ready(m66592, pipenum);
+		disable_irq_empty(m66592, pipenum);
+	} else {
+		disable_irq_ready(m66592, pipenum);
+		enable_irq_empty(m66592, pipenum);
+	}
+	pipe_start(m66592, pipenum);
+}
+
+static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+	u16 tmp;
+	unsigned bufsize;
+	size_t size;
+	void *buf;
+	u16 pipenum = ep->pipenum;
+	struct m66592 *m66592 = ep->m66592;
+
+	pipe_change(m66592, pipenum);
+	tmp = m66592_read(m66592, ep->fifoctr);
+	if (unlikely((tmp & M66592_FRDY) == 0)) {
+		pipe_stop(m66592, pipenum);
+		pipe_irq_disable(m66592, pipenum);
+		pr_err("write fifo not ready. pipnum=%d\n", pipenum);
+		return;
+	}
+
+	/* prepare parameters */
+	bufsize = get_buffer_size(m66592, pipenum);
+	buf = req->req.buf + req->req.actual;
+	size = min(bufsize, req->req.length - req->req.actual);
+
+	/* write fifo */
+	if (req->req.buf) {
+		m66592_write_fifo(m66592, ep, buf, size);
+		if ((size == 0)
+				|| ((size % ep->ep.maxpacket) != 0)
+				|| ((bufsize != ep->ep.maxpacket)
+					&& (bufsize > size)))
+			m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+	}
+
+	/* update parameters */
+	req->req.actual += size;
+	/* check transfer finish */
+	if ((!req->req.zero && (req->req.actual == req->req.length))
+			|| (size % ep->ep.maxpacket)
+			|| (size == 0)) {
+		disable_irq_ready(m66592, pipenum);
+		enable_irq_empty(m66592, pipenum);
+	} else {
+		disable_irq_empty(m66592, pipenum);
+		pipe_irq_enable(m66592, pipenum);
+	}
+}
+
+static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req)
+{
+	u16 tmp;
+	int rcv_len, bufsize, req_len;
+	int size;
+	void *buf;
+	u16 pipenum = ep->pipenum;
+	struct m66592 *m66592 = ep->m66592;
+	int finish = 0;
+
+	pipe_change(m66592, pipenum);
+	tmp = m66592_read(m66592, ep->fifoctr);
+	if (unlikely((tmp & M66592_FRDY) == 0)) {
+		req->req.status = -EPIPE;
+		pipe_stop(m66592, pipenum);
+		pipe_irq_disable(m66592, pipenum);
+		pr_err("read fifo not ready");
+		return;
+	}
+
+	/* prepare parameters */
+	rcv_len = tmp & M66592_DTLN;
+	bufsize = get_buffer_size(m66592, pipenum);
+
+	buf = req->req.buf + req->req.actual;
+	req_len = req->req.length - req->req.actual;
+	if (rcv_len < bufsize)
+		size = min(rcv_len, req_len);
+	else
+		size = min(bufsize, req_len);
+
+	/* update parameters */
+	req->req.actual += size;
+
+	/* check transfer finish */
+	if ((!req->req.zero && (req->req.actual == req->req.length))
+			|| (size % ep->ep.maxpacket)
+			|| (size == 0)) {
+		pipe_stop(m66592, pipenum);
+		pipe_irq_disable(m66592, pipenum);
+		finish = 1;
+	}
+
+	/* read fifo */
+	if (req->req.buf) {
+		if (size == 0)
+			m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+		else
+			m66592_read_fifo(m66592, ep->fifoaddr, buf, size);
+	}
+
+	if ((ep->pipenum != 0) && finish)
+		transfer_complete(ep, req, 0);
+}
+
+static void irq_pipe_ready(struct m66592 *m66592, u16 status, u16 enb)
+{
+	u16 check;
+	u16 pipenum;
+	struct m66592_ep *ep;
+	struct m66592_request *req;
+
+	if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) {
+		m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS);
+		m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE,
+				M66592_CFIFOSEL);
+
+		ep = &m66592->ep[0];
+		req = list_entry(ep->queue.next, struct m66592_request, queue);
+		irq_packet_read(ep, req);
+	} else {
+		for (pipenum = 1; pipenum < M66592_MAX_NUM_PIPE; pipenum++) {
+			check = 1 << pipenum;
+			if ((status & check) && (enb & check)) {
+				m66592_write(m66592, ~check, M66592_BRDYSTS);
+				ep = m66592->pipenum2ep[pipenum];
+				req = list_entry(ep->queue.next,
+						 struct m66592_request, queue);
+				if (ep->desc->bEndpointAddress & USB_DIR_IN)
+					irq_packet_write(ep, req);
+				else
+					irq_packet_read(ep, req);
+			}
+		}
+	}
+}
+
+static void irq_pipe_empty(struct m66592 *m66592, u16 status, u16 enb)
+{
+	u16 tmp;
+	u16 check;
+	u16 pipenum;
+	struct m66592_ep *ep;
+	struct m66592_request *req;
+
+	if ((status & M66592_BEMP0) && (enb & M66592_BEMP0)) {
+		m66592_write(m66592, ~M66592_BEMP0, M66592_BEMPSTS);
+
+		ep = &m66592->ep[0];
+		req = list_entry(ep->queue.next, struct m66592_request, queue);
+		irq_ep0_write(ep, req);
+	} else {
+		for (pipenum = 1; pipenum < M66592_MAX_NUM_PIPE; pipenum++) {
+			check = 1 << pipenum;
+			if ((status & check) && (enb & check)) {
+				m66592_write(m66592, ~check, M66592_BEMPSTS);
+				tmp = control_reg_get(m66592, pipenum);
+				if ((tmp & M66592_INBUFM) == 0) {
+					disable_irq_empty(m66592, pipenum);
+					pipe_irq_disable(m66592, pipenum);
+					pipe_stop(m66592, pipenum);
+					ep = m66592->pipenum2ep[pipenum];
+					req = list_entry(ep->queue.next,
+							 struct m66592_request,
+							 queue);
+					if (!list_empty(&ep->queue))
+						transfer_complete(ep, req, 0);
+				}
+			}
+		}
+	}
+}
+
+static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+	struct m66592_ep *ep;
+	u16 pid;
+	u16 status = 0;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		status = 1 << USB_DEVICE_SELF_POWERED;
+		break;
+	case USB_RECIP_INTERFACE:
+		status = 0;
+		break;
+	case USB_RECIP_ENDPOINT:
+		ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+		pid = control_reg_get_pid(m66592, ep->pipenum);
+		if (pid == M66592_PID_STALL)
+			status = 1 << USB_ENDPOINT_HALT;
+		else
+			status = 0;
+		break;
+	default:
+		pipe_stall(m66592, 0);
+		return;		/* exit */
+	}
+
+	m66592->ep0_data = cpu_to_le16(status);
+	m66592->ep0_req->buf = &m66592->ep0_data;
+	m66592->ep0_req->length = 2;
+	/* AV: what happens if we get called again before that gets through? */
+	spin_unlock(&m66592->lock);
+	m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL);
+	spin_lock(&m66592->lock);
+}
+
+static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		control_end(m66592, 1);
+		break;
+	case USB_RECIP_INTERFACE:
+		control_end(m66592, 1);
+		break;
+	case USB_RECIP_ENDPOINT: {
+		struct m66592_ep *ep;
+		struct m66592_request *req;
+		u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+		ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+		pipe_stop(m66592, ep->pipenum);
+		control_reg_sqclr(m66592, ep->pipenum);
+
+		control_end(m66592, 1);
+
+		req = list_entry(ep->queue.next,
+		struct m66592_request, queue);
+		if (ep->busy) {
+			ep->busy = 0;
+			if (list_empty(&ep->queue))
+				break;
+			start_packet(ep, req);
+		} else if (!list_empty(&ep->queue))
+			pipe_start(m66592, ep->pipenum);
+		}
+		break;
+	default:
+		pipe_stall(m66592, 0);
+		break;
+	}
+}
+
+static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+	u16 tmp;
+	int timeout = 3000;
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		switch (le16_to_cpu(ctrl->wValue)) {
+		case USB_DEVICE_TEST_MODE:
+			control_end(m66592, 1);
+			/* Wait for the completion of status stage */
+			do {
+				tmp = m66592_read(m66592, M66592_INTSTS0) &
+								M66592_CTSQ;
+				udelay(1);
+			} while (tmp != M66592_CS_IDST || timeout-- > 0);
+
+			if (tmp == M66592_CS_IDST)
+				m66592_bset(m66592,
+					    le16_to_cpu(ctrl->wIndex >> 8),
+					    M66592_TESTMODE);
+			break;
+		default:
+			pipe_stall(m66592, 0);
+			break;
+		}
+		break;
+	case USB_RECIP_INTERFACE:
+		control_end(m66592, 1);
+		break;
+	case USB_RECIP_ENDPOINT: {
+		struct m66592_ep *ep;
+		u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+		ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+		pipe_stall(m66592, ep->pipenum);
+
+		control_end(m66592, 1);
+		}
+		break;
+	default:
+		pipe_stall(m66592, 0);
+		break;
+	}
+}
+
+/* if return value is true, call class driver's setup() */
+static int setup_packet(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+	u16 *p = (u16 *)ctrl;
+	unsigned long offset = M66592_USBREQ;
+	int i, ret = 0;
+
+	/* read fifo */
+	m66592_write(m66592, ~M66592_VALID, M66592_INTSTS0);
+
+	for (i = 0; i < 4; i++)
+		p[i] = m66592_read(m66592, offset + i*2);
+
+	/* check request */
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (ctrl->bRequest) {
+		case USB_REQ_GET_STATUS:
+			get_status(m66592, ctrl);
+			break;
+		case USB_REQ_CLEAR_FEATURE:
+			clear_feature(m66592, ctrl);
+			break;
+		case USB_REQ_SET_FEATURE:
+			set_feature(m66592, ctrl);
+			break;
+		default:
+			ret = 1;
+			break;
+		}
+	} else
+		ret = 1;
+	return ret;
+}
+
+static void m66592_update_usb_speed(struct m66592 *m66592)
+{
+	u16 speed = get_usb_speed(m66592);
+
+	switch (speed) {
+	case M66592_HSMODE:
+		m66592->gadget.speed = USB_SPEED_HIGH;
+		break;
+	case M66592_FSMODE:
+		m66592->gadget.speed = USB_SPEED_FULL;
+		break;
+	default:
+		m66592->gadget.speed = USB_SPEED_UNKNOWN;
+		pr_err("USB speed unknown\n");
+	}
+}
+
+static void irq_device_state(struct m66592 *m66592)
+{
+	u16 dvsq;
+
+	dvsq = m66592_read(m66592, M66592_INTSTS0) & M66592_DVSQ;
+	m66592_write(m66592, ~M66592_DVST, M66592_INTSTS0);
+
+	if (dvsq == M66592_DS_DFLT) {	/* bus reset */
+		m66592->driver->disconnect(&m66592->gadget);
+		m66592_update_usb_speed(m66592);
+	}
+	if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG)
+		m66592_update_usb_speed(m66592);
+	if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS)
+			&& m66592->gadget.speed == USB_SPEED_UNKNOWN)
+		m66592_update_usb_speed(m66592);
+
+	m66592->old_dvsq = dvsq;
+}
+
+static void irq_control_stage(struct m66592 *m66592)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+	struct usb_ctrlrequest ctrl;
+	u16 ctsq;
+
+	ctsq = m66592_read(m66592, M66592_INTSTS0) & M66592_CTSQ;
+	m66592_write(m66592, ~M66592_CTRT, M66592_INTSTS0);
+
+	switch (ctsq) {
+	case M66592_CS_IDST: {
+		struct m66592_ep *ep;
+		struct m66592_request *req;
+		ep = &m66592->ep[0];
+		req = list_entry(ep->queue.next, struct m66592_request, queue);
+		transfer_complete(ep, req, 0);
+		}
+		break;
+
+	case M66592_CS_RDDS:
+	case M66592_CS_WRDS:
+	case M66592_CS_WRND:
+		if (setup_packet(m66592, &ctrl)) {
+			spin_unlock(&m66592->lock);
+			if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0)
+				pipe_stall(m66592, 0);
+			spin_lock(&m66592->lock);
+		}
+		break;
+	case M66592_CS_RDSS:
+	case M66592_CS_WRSS:
+		control_end(m66592, 0);
+		break;
+	default:
+		pr_err("ctrl_stage: unexpect ctsq(%x)\n", ctsq);
+		break;
+	}
+}
+
+static irqreturn_t m66592_irq(int irq, void *_m66592)
+{
+	struct m66592 *m66592 = _m66592;
+	u16 intsts0;
+	u16 intenb0;
+	u16 brdysts, nrdysts, bempsts;
+	u16 brdyenb, nrdyenb, bempenb;
+	u16 savepipe;
+	u16 mask0;
+
+	spin_lock(&m66592->lock);
+
+	intsts0 = m66592_read(m66592, M66592_INTSTS0);
+	intenb0 = m66592_read(m66592, M66592_INTENB0);
+
+	if (m66592->pdata->on_chip && !intsts0 && !intenb0) {
+		/*
+		 * When USB clock stops, it cannot read register. Even if a
+		 * clock stops, the interrupt occurs. So this driver turn on
+		 * a clock by this timing and do re-reading of register.
+		 */
+		m66592_start_xclock(m66592);
+		intsts0 = m66592_read(m66592, M66592_INTSTS0);
+		intenb0 = m66592_read(m66592, M66592_INTENB0);
+	}
+
+	savepipe = m66592_read(m66592, M66592_CFIFOSEL);
+
+	mask0 = intsts0 & intenb0;
+	if (mask0) {
+		brdysts = m66592_read(m66592, M66592_BRDYSTS);
+		nrdysts = m66592_read(m66592, M66592_NRDYSTS);
+		bempsts = m66592_read(m66592, M66592_BEMPSTS);
+		brdyenb = m66592_read(m66592, M66592_BRDYENB);
+		nrdyenb = m66592_read(m66592, M66592_NRDYENB);
+		bempenb = m66592_read(m66592, M66592_BEMPENB);
+
+		if (mask0 & M66592_VBINT) {
+			m66592_write(m66592,  0xffff & ~M66592_VBINT,
+					M66592_INTSTS0);
+			m66592_start_xclock(m66592);
+
+			/* start vbus sampling */
+			m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0)
+					& M66592_VBSTS;
+			m66592->scount = M66592_MAX_SAMPLING;
+
+			mod_timer(&m66592->timer,
+					jiffies + msecs_to_jiffies(50));
+		}
+		if (intsts0 & M66592_DVSQ)
+			irq_device_state(m66592);
+
+		if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE)
+				&& (brdysts & brdyenb)) {
+			irq_pipe_ready(m66592, brdysts, brdyenb);
+		}
+		if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE)
+				&& (bempsts & bempenb)) {
+			irq_pipe_empty(m66592, bempsts, bempenb);
+		}
+
+		if (intsts0 & M66592_CTRT)
+			irq_control_stage(m66592);
+	}
+
+	m66592_write(m66592, savepipe, M66592_CFIFOSEL);
+
+	spin_unlock(&m66592->lock);
+	return IRQ_HANDLED;
+}
+
+static void m66592_timer(unsigned long _m66592)
+{
+	struct m66592 *m66592 = (struct m66592 *)_m66592;
+	unsigned long flags;
+	u16 tmp;
+
+	spin_lock_irqsave(&m66592->lock, flags);
+	tmp = m66592_read(m66592, M66592_SYSCFG);
+	if (!(tmp & M66592_RCKE)) {
+		m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG);
+		udelay(10);
+		m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG);
+	}
+	if (m66592->scount > 0) {
+		tmp = m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS;
+		if (tmp == m66592->old_vbus) {
+			m66592->scount--;
+			if (m66592->scount == 0) {
+				if (tmp == M66592_VBSTS)
+					m66592_usb_connect(m66592);
+				else
+					m66592_usb_disconnect(m66592);
+			} else {
+				mod_timer(&m66592->timer,
+					jiffies + msecs_to_jiffies(50));
+			}
+		} else {
+			m66592->scount = M66592_MAX_SAMPLING;
+			m66592->old_vbus = tmp;
+			mod_timer(&m66592->timer,
+					jiffies + msecs_to_jiffies(50));
+		}
+	}
+	spin_unlock_irqrestore(&m66592->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+static int m66592_enable(struct usb_ep *_ep,
+			 const struct usb_endpoint_descriptor *desc)
+{
+	struct m66592_ep *ep;
+
+	ep = container_of(_ep, struct m66592_ep, ep);
+	return alloc_pipe_config(ep, desc);
+}
+
+static int m66592_disable(struct usb_ep *_ep)
+{
+	struct m66592_ep *ep;
+	struct m66592_request *req;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct m66592_ep, ep);
+	BUG_ON(!ep);
+
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct m66592_request, queue);
+		spin_lock_irqsave(&ep->m66592->lock, flags);
+		transfer_complete(ep, req, -ECONNRESET);
+		spin_unlock_irqrestore(&ep->m66592->lock, flags);
+	}
+
+	pipe_irq_disable(ep->m66592, ep->pipenum);
+	return free_pipe_config(ep);
+}
+
+static struct usb_request *m66592_alloc_request(struct usb_ep *_ep,
+						gfp_t gfp_flags)
+{
+	struct m66592_request *req;
+
+	req = kzalloc(sizeof(struct m66592_request), gfp_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void m66592_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct m66592_request *req;
+
+	req = container_of(_req, struct m66592_request, req);
+	kfree(req);
+}
+
+static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
+			gfp_t gfp_flags)
+{
+	struct m66592_ep *ep;
+	struct m66592_request *req;
+	unsigned long flags;
+	int request = 0;
+
+	ep = container_of(_ep, struct m66592_ep, ep);
+	req = container_of(_req, struct m66592_request, req);
+
+	if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&ep->m66592->lock, flags);
+
+	if (list_empty(&ep->queue))
+		request = 1;
+
+	list_add_tail(&req->queue, &ep->queue);
+	req->req.actual = 0;
+	req->req.status = -EINPROGRESS;
+
+	if (ep->desc == NULL)	/* control */
+		start_ep0(ep, req);
+	else {
+		if (request && !ep->busy)
+			start_packet(ep, req);
+	}
+
+	spin_unlock_irqrestore(&ep->m66592->lock, flags);
+
+	return 0;
+}
+
+static int m66592_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct m66592_ep *ep;
+	struct m66592_request *req;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct m66592_ep, ep);
+	req = container_of(_req, struct m66592_request, req);
+
+	spin_lock_irqsave(&ep->m66592->lock, flags);
+	if (!list_empty(&ep->queue))
+		transfer_complete(ep, req, -ECONNRESET);
+	spin_unlock_irqrestore(&ep->m66592->lock, flags);
+
+	return 0;
+}
+
+static int m66592_set_halt(struct usb_ep *_ep, int value)
+{
+	struct m66592_ep *ep;
+	struct m66592_request *req;
+	unsigned long flags;
+	int ret = 0;
+
+	ep = container_of(_ep, struct m66592_ep, ep);
+	req = list_entry(ep->queue.next, struct m66592_request, queue);
+
+	spin_lock_irqsave(&ep->m66592->lock, flags);
+	if (!list_empty(&ep->queue)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	if (value) {
+		ep->busy = 1;
+		pipe_stall(ep->m66592, ep->pipenum);
+	} else {
+		ep->busy = 0;
+		pipe_stop(ep->m66592, ep->pipenum);
+	}
+
+out:
+	spin_unlock_irqrestore(&ep->m66592->lock, flags);
+	return ret;
+}
+
+static void m66592_fifo_flush(struct usb_ep *_ep)
+{
+	struct m66592_ep *ep;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct m66592_ep, ep);
+	spin_lock_irqsave(&ep->m66592->lock, flags);
+	if (list_empty(&ep->queue) && !ep->busy) {
+		pipe_stop(ep->m66592, ep->pipenum);
+		m66592_bclr(ep->m66592, M66592_BCLR, ep->fifoctr);
+	}
+	spin_unlock_irqrestore(&ep->m66592->lock, flags);
+}
+
+static struct usb_ep_ops m66592_ep_ops = {
+	.enable		= m66592_enable,
+	.disable	= m66592_disable,
+
+	.alloc_request	= m66592_alloc_request,
+	.free_request	= m66592_free_request,
+
+	.queue		= m66592_queue,
+	.dequeue	= m66592_dequeue,
+
+	.set_halt	= m66592_set_halt,
+	.fifo_flush	= m66592_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+static struct m66592 *the_controller;
+
+static int m66592_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct m66592 *m66592 = the_controller;
+	int retval;
+
+	if (!driver
+			|| driver->max_speed < USB_SPEED_HIGH
+			|| !bind
+			|| !driver->setup)
+		return -EINVAL;
+	if (!m66592)
+		return -ENODEV;
+	if (m66592->driver)
+		return -EBUSY;
+
+	/* hook up the driver */
+	driver->driver.bus = NULL;
+	m66592->driver = driver;
+	m66592->gadget.dev.driver = &driver->driver;
+
+	retval = device_add(&m66592->gadget.dev);
+	if (retval) {
+		pr_err("device_add error (%d)\n", retval);
+		goto error;
+	}
+
+	retval = bind(&m66592->gadget);
+	if (retval) {
+		pr_err("bind to driver error (%d)\n", retval);
+		device_del(&m66592->gadget.dev);
+		goto error;
+	}
+
+	m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
+	if (m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS) {
+		m66592_start_xclock(m66592);
+		/* start vbus sampling */
+		m66592->old_vbus = m66592_read(m66592,
+					 M66592_INTSTS0) & M66592_VBSTS;
+		m66592->scount = M66592_MAX_SAMPLING;
+		mod_timer(&m66592->timer, jiffies + msecs_to_jiffies(50));
+	}
+
+	return 0;
+
+error:
+	m66592->driver = NULL;
+	m66592->gadget.dev.driver = NULL;
+
+	return retval;
+}
+
+static int m66592_stop(struct usb_gadget_driver *driver)
+{
+	struct m66592 *m66592 = the_controller;
+	unsigned long flags;
+
+	if (driver != m66592->driver || !driver->unbind)
+		return -EINVAL;
+
+	spin_lock_irqsave(&m66592->lock, flags);
+	if (m66592->gadget.speed != USB_SPEED_UNKNOWN)
+		m66592_usb_disconnect(m66592);
+	spin_unlock_irqrestore(&m66592->lock, flags);
+
+	m66592_bclr(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
+
+	driver->unbind(&m66592->gadget);
+	m66592->gadget.dev.driver = NULL;
+
+	init_controller(m66592);
+	disable_controller(m66592);
+
+	device_del(&m66592->gadget.dev);
+	m66592->driver = NULL;
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static int m66592_get_frame(struct usb_gadget *_gadget)
+{
+	struct m66592 *m66592 = gadget_to_m66592(_gadget);
+	return m66592_read(m66592, M66592_FRMNUM) & 0x03FF;
+}
+
+static int m66592_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct m66592 *m66592 = gadget_to_m66592(gadget);
+	unsigned long flags;
+
+	spin_lock_irqsave(&m66592->lock, flags);
+	if (is_on)
+		m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
+	else
+		m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+	spin_unlock_irqrestore(&m66592->lock, flags);
+
+	return 0;
+}
+
+static struct usb_gadget_ops m66592_gadget_ops = {
+	.get_frame		= m66592_get_frame,
+	.start			= m66592_start,
+	.stop			= m66592_stop,
+	.pullup			= m66592_pullup,
+};
+
+static int __exit m66592_remove(struct platform_device *pdev)
+{
+	struct m66592		*m66592 = dev_get_drvdata(&pdev->dev);
+
+	usb_del_gadget_udc(&m66592->gadget);
+
+	del_timer_sync(&m66592->timer);
+	iounmap(m66592->reg);
+	free_irq(platform_get_irq(pdev, 0), m66592);
+	m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+#ifdef CONFIG_HAVE_CLK
+	if (m66592->pdata->on_chip) {
+		clk_disable(m66592->clk);
+		clk_put(m66592->clk);
+	}
+#endif
+	kfree(m66592);
+	return 0;
+}
+
+static void nop_completion(struct usb_ep *ep, struct usb_request *r)
+{
+}
+
+static int __init m66592_probe(struct platform_device *pdev)
+{
+	struct resource *res, *ires;
+	void __iomem *reg = NULL;
+	struct m66592 *m66592 = NULL;
+#ifdef CONFIG_HAVE_CLK
+	char clk_name[8];
+#endif
+	int ret = 0;
+	int i;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		pr_err("platform_get_resource error.\n");
+		goto clean_up;
+	}
+
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!ires) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev,
+			"platform_get_resource IORESOURCE_IRQ error.\n");
+		goto clean_up;
+	}
+
+	reg = ioremap(res->start, resource_size(res));
+	if (reg == NULL) {
+		ret = -ENOMEM;
+		pr_err("ioremap error.\n");
+		goto clean_up;
+	}
+
+	if (pdev->dev.platform_data == NULL) {
+		dev_err(&pdev->dev, "no platform data\n");
+		ret = -ENODEV;
+		goto clean_up;
+	}
+
+	/* initialize ucd */
+	m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
+	if (m66592 == NULL) {
+		ret = -ENOMEM;
+		pr_err("kzalloc error\n");
+		goto clean_up;
+	}
+
+	m66592->pdata = pdev->dev.platform_data;
+	m66592->irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
+
+	spin_lock_init(&m66592->lock);
+	dev_set_drvdata(&pdev->dev, m66592);
+
+	m66592->gadget.ops = &m66592_gadget_ops;
+	device_initialize(&m66592->gadget.dev);
+	dev_set_name(&m66592->gadget.dev, "gadget");
+	m66592->gadget.max_speed = USB_SPEED_HIGH;
+	m66592->gadget.dev.parent = &pdev->dev;
+	m66592->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	m66592->gadget.dev.release = pdev->dev.release;
+	m66592->gadget.name = udc_name;
+
+	init_timer(&m66592->timer);
+	m66592->timer.function = m66592_timer;
+	m66592->timer.data = (unsigned long)m66592;
+	m66592->reg = reg;
+
+	ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
+			udc_name, m66592);
+	if (ret < 0) {
+		pr_err("request_irq error (%d)\n", ret);
+		goto clean_up;
+	}
+
+#ifdef CONFIG_HAVE_CLK
+	if (m66592->pdata->on_chip) {
+		snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id);
+		m66592->clk = clk_get(&pdev->dev, clk_name);
+		if (IS_ERR(m66592->clk)) {
+			dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
+				clk_name);
+			ret = PTR_ERR(m66592->clk);
+			goto clean_up2;
+		}
+		clk_enable(m66592->clk);
+	}
+#endif
+	INIT_LIST_HEAD(&m66592->gadget.ep_list);
+	m66592->gadget.ep0 = &m66592->ep[0].ep;
+	INIT_LIST_HEAD(&m66592->gadget.ep0->ep_list);
+	for (i = 0; i < M66592_MAX_NUM_PIPE; i++) {
+		struct m66592_ep *ep = &m66592->ep[i];
+
+		if (i != 0) {
+			INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list);
+			list_add_tail(&m66592->ep[i].ep.ep_list,
+					&m66592->gadget.ep_list);
+		}
+		ep->m66592 = m66592;
+		INIT_LIST_HEAD(&ep->queue);
+		ep->ep.name = m66592_ep_name[i];
+		ep->ep.ops = &m66592_ep_ops;
+		ep->ep.maxpacket = 512;
+	}
+	m66592->ep[0].ep.maxpacket = 64;
+	m66592->ep[0].pipenum = 0;
+	m66592->ep[0].fifoaddr = M66592_CFIFO;
+	m66592->ep[0].fifosel = M66592_CFIFOSEL;
+	m66592->ep[0].fifoctr = M66592_CFIFOCTR;
+	m66592->ep[0].fifotrn = 0;
+	m66592->ep[0].pipectr = get_pipectr_addr(0);
+	m66592->pipenum2ep[0] = &m66592->ep[0];
+	m66592->epaddr2ep[0] = &m66592->ep[0];
+
+	the_controller = m66592;
+
+	m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
+	if (m66592->ep0_req == NULL)
+		goto clean_up3;
+	m66592->ep0_req->complete = nop_completion;
+
+	init_controller(m66592);
+
+	ret = usb_add_gadget_udc(&pdev->dev, &m66592->gadget);
+	if (ret)
+		goto err_add_udc;
+
+	dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+	return 0;
+
+err_add_udc:
+	m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+
+clean_up3:
+#ifdef CONFIG_HAVE_CLK
+	if (m66592->pdata->on_chip) {
+		clk_disable(m66592->clk);
+		clk_put(m66592->clk);
+	}
+clean_up2:
+#endif
+	free_irq(ires->start, m66592);
+clean_up:
+	if (m66592) {
+		if (m66592->ep0_req)
+			m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+		kfree(m66592);
+	}
+	if (reg)
+		iounmap(reg);
+
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+static struct platform_driver m66592_driver = {
+	.remove =	__exit_p(m66592_remove),
+	.driver		= {
+		.name =	(char *) udc_name,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init m66592_udc_init(void)
+{
+	return platform_driver_probe(&m66592_driver, m66592_probe);
+}
+module_init(m66592_udc_init);
+
+static void __exit m66592_udc_cleanup(void)
+{
+	platform_driver_unregister(&m66592_driver);
+}
+module_exit(m66592_udc_cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/m66592-udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/m66592-udc.h
new file mode 100644
index 0000000..9d9f7e3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/m66592-udc.h
@@ -0,0 +1,610 @@
+/*
+ * M66592 UDC (USB gadget)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __M66592_UDC_H__
+#define __M66592_UDC_H__
+
+#ifdef CONFIG_HAVE_CLK
+#include <linux/clk.h>
+#endif
+
+#include <linux/usb/m66592.h>
+
+#define M66592_SYSCFG		0x00
+#define M66592_XTAL		0xC000	/* b15-14: Crystal selection */
+#define   M66592_XTAL48		 0x8000		/* 48MHz */
+#define   M66592_XTAL24		 0x4000		/* 24MHz */
+#define   M66592_XTAL12		 0x0000		/* 12MHz */
+#define M66592_XCKE		0x2000	/* b13: External clock enable */
+#define M66592_RCKE		0x1000	/* b12: Register clock enable */
+#define M66592_PLLC		0x0800	/* b11: PLL control */
+#define M66592_SCKE		0x0400	/* b10: USB clock enable */
+#define M66592_ATCKM		0x0100	/* b8: Automatic clock supply */
+#define M66592_HSE		0x0080	/* b7: Hi-speed enable */
+#define M66592_DCFM		0x0040	/* b6: Controller function select  */
+#define M66592_DMRPD		0x0020	/* b5: D- pull down control */
+#define M66592_DPRPU		0x0010	/* b4: D+ pull up control */
+#define M66592_FSRPC		0x0004	/* b2: Full-speed receiver enable */
+#define M66592_PCUT		0x0002	/* b1: Low power sleep enable */
+#define M66592_USBE		0x0001	/* b0: USB module operation enable */
+
+#define M66592_SYSSTS		0x02
+#define M66592_LNST		0x0003	/* b1-0: D+, D- line status */
+#define   M66592_SE1		 0x0003		/* SE1 */
+#define   M66592_KSTS		 0x0002		/* K State */
+#define   M66592_JSTS		 0x0001		/* J State */
+#define   M66592_SE0		 0x0000		/* SE0 */
+
+#define M66592_DVSTCTR		0x04
+#define M66592_WKUP		0x0100	/* b8: Remote wakeup */
+#define M66592_RWUPE		0x0080	/* b7: Remote wakeup sense */
+#define M66592_USBRST		0x0040	/* b6: USB reset enable */
+#define M66592_RESUME		0x0020	/* b5: Resume enable */
+#define M66592_UACT		0x0010	/* b4: USB bus enable */
+#define M66592_RHST		0x0003	/* b1-0: Reset handshake status */
+#define   M66592_HSMODE		 0x0003		/* Hi-Speed mode */
+#define   M66592_FSMODE		 0x0002		/* Full-Speed mode */
+#define   M66592_HSPROC		 0x0001		/* HS handshake is processing */
+
+#define M66592_TESTMODE		0x06
+#define M66592_UTST		0x000F	/* b4-0: Test select */
+#define   M66592_H_TST_PACKET	 0x000C		/* HOST TEST Packet */
+#define   M66592_H_TST_SE0_NAK	 0x000B		/* HOST TEST SE0 NAK */
+#define   M66592_H_TST_K	 0x000A		/* HOST TEST K */
+#define   M66592_H_TST_J	 0x0009		/* HOST TEST J */
+#define   M66592_H_TST_NORMAL	 0x0000		/* HOST Normal Mode */
+#define   M66592_P_TST_PACKET	 0x0004		/* PERI TEST Packet */
+#define   M66592_P_TST_SE0_NAK	 0x0003		/* PERI TEST SE0 NAK */
+#define   M66592_P_TST_K	 0x0002		/* PERI TEST K */
+#define   M66592_P_TST_J	 0x0001		/* PERI TEST J */
+#define   M66592_P_TST_NORMAL	 0x0000		/* PERI Normal Mode */
+
+/* built-in registers */
+#define M66592_CFBCFG		0x0A
+#define M66592_D0FBCFG		0x0C
+#define M66592_LITTLE		0x0100	/* b8: Little endian mode */
+/* external chip case */
+#define M66592_PINCFG		0x0A
+#define M66592_LDRV		0x8000	/* b15: Drive Current Adjust */
+#define M66592_BIGEND		0x0100	/* b8: Big endian mode */
+
+#define M66592_DMA0CFG		0x0C
+#define M66592_DMA1CFG		0x0E
+#define M66592_DREQA		0x4000	/* b14: Dreq active select */
+#define M66592_BURST		0x2000	/* b13: Burst mode */
+#define M66592_DACKA		0x0400	/* b10: Dack active select */
+#define M66592_DFORM		0x0380	/* b9-7: DMA mode select */
+#define   M66592_CPU_ADR_RD_WR	 0x0000   /* Address + RD/WR mode (CPU bus) */
+#define   M66592_CPU_DACK_RD_WR	 0x0100   /* DACK + RD/WR mode (CPU bus) */
+#define   M66592_CPU_DACK_ONLY	 0x0180   /* DACK only mode (CPU bus) */
+#define   M66592_SPLIT_DACK_ONLY 0x0200   /* DACK only mode (SPLIT bus) */
+#define   M66592_SPLIT_DACK_DSTB 0x0300   /* DACK + DSTB0 mode (SPLIT bus) */
+#define M66592_DENDA		0x0040	/* b6: Dend active select */
+#define M66592_PKTM		0x0020	/* b5: Packet mode */
+#define M66592_DENDE		0x0010	/* b4: Dend enable */
+#define M66592_OBUS		0x0004	/* b2: OUTbus mode */
+
+/* common case */
+#define M66592_CFIFO		0x10
+#define M66592_D0FIFO		0x14
+#define M66592_D1FIFO		0x18
+
+#define M66592_CFIFOSEL		0x1E
+#define M66592_D0FIFOSEL	0x24
+#define M66592_D1FIFOSEL	0x2A
+#define M66592_RCNT		0x8000	/* b15: Read count mode */
+#define M66592_REW		0x4000	/* b14: Buffer rewind */
+#define M66592_DCLRM		0x2000	/* b13: DMA buffer clear mode */
+#define M66592_DREQE		0x1000	/* b12: DREQ output enable */
+#define M66592_MBW_8		0x0000   /*  8bit */
+#define M66592_MBW_16		0x0400   /* 16bit */
+#define M66592_MBW_32		0x0800   /* 32bit */
+#define M66592_TRENB		0x0200	/* b9: Transaction counter enable */
+#define M66592_TRCLR		0x0100	/* b8: Transaction counter clear */
+#define M66592_DEZPM		0x0080	/* b7: Zero-length packet mode */
+#define M66592_ISEL		0x0020	/* b5: DCP FIFO port direction select */
+#define M66592_CURPIPE		0x0007	/* b2-0: PIPE select */
+
+#define M66592_CFIFOCTR		0x20
+#define M66592_D0FIFOCTR	0x26
+#define M66592_D1FIFOCTR	0x2c
+#define M66592_BVAL		0x8000	/* b15: Buffer valid flag */
+#define M66592_BCLR		0x4000	/* b14: Buffer clear */
+#define M66592_FRDY		0x2000	/* b13: FIFO ready */
+#define M66592_DTLN		0x0FFF	/* b11-0: FIFO received data length */
+
+#define M66592_CFIFOSIE		0x22
+#define M66592_TGL		0x8000	/* b15: Buffer toggle */
+#define M66592_SCLR		0x4000	/* b14: Buffer clear */
+#define M66592_SBUSY		0x2000	/* b13: SIE_FIFO busy */
+
+#define M66592_D0FIFOTRN	0x28
+#define M66592_D1FIFOTRN	0x2E
+#define M66592_TRNCNT		0xFFFF	/* b15-0: Transaction counter */
+
+#define M66592_INTENB0	0x30
+#define M66592_VBSE	0x8000	/* b15: VBUS interrupt */
+#define M66592_RSME	0x4000	/* b14: Resume interrupt */
+#define M66592_SOFE	0x2000	/* b13: Frame update interrupt */
+#define M66592_DVSE	0x1000	/* b12: Device state transition interrupt */
+#define M66592_CTRE	0x0800	/* b11: Control transfer stage transition irq */
+#define M66592_BEMPE	0x0400	/* b10: Buffer empty interrupt */
+#define M66592_NRDYE	0x0200	/* b9: Buffer not ready interrupt */
+#define M66592_BRDYE	0x0100	/* b8: Buffer ready interrupt */
+#define M66592_URST	0x0080	/* b7: USB reset detected interrupt */
+#define M66592_SADR	0x0040	/* b6: Set address executed interrupt */
+#define M66592_SCFG	0x0020	/* b5: Set configuration executed interrupt */
+#define M66592_SUSP	0x0010	/* b4: Suspend detected interrupt */
+#define M66592_WDST	0x0008	/* b3: Control write data stage completed irq */
+#define M66592_RDST	0x0004	/* b2: Control read data stage completed irq */
+#define M66592_CMPL	0x0002	/* b1: Control transfer complete interrupt */
+#define M66592_SERR	0x0001	/* b0: Sequence error interrupt */
+
+#define M66592_INTENB1	0x32
+#define M66592_BCHGE	0x4000	/* b14: USB us chenge interrupt */
+#define M66592_DTCHE	0x1000	/* b12: Detach sense interrupt */
+#define M66592_SIGNE	0x0020	/* b5: SETUP IGNORE interrupt */
+#define M66592_SACKE	0x0010	/* b4: SETUP ACK interrupt */
+#define M66592_BRDYM	0x0004	/* b2: BRDY clear timing */
+#define M66592_INTL	0x0002	/* b1: Interrupt sense select */
+#define M66592_PCSE	0x0001	/* b0: PCUT enable by CS assert */
+
+#define M66592_BRDYENB		0x36
+#define M66592_BRDYSTS		0x46
+#define M66592_BRDY7		0x0080	/* b7: PIPE7 */
+#define M66592_BRDY6		0x0040	/* b6: PIPE6 */
+#define M66592_BRDY5		0x0020	/* b5: PIPE5 */
+#define M66592_BRDY4		0x0010	/* b4: PIPE4 */
+#define M66592_BRDY3		0x0008	/* b3: PIPE3 */
+#define M66592_BRDY2		0x0004	/* b2: PIPE2 */
+#define M66592_BRDY1		0x0002	/* b1: PIPE1 */
+#define M66592_BRDY0		0x0001	/* b1: PIPE0 */
+
+#define M66592_NRDYENB		0x38
+#define M66592_NRDYSTS		0x48
+#define M66592_NRDY7		0x0080	/* b7: PIPE7 */
+#define M66592_NRDY6		0x0040	/* b6: PIPE6 */
+#define M66592_NRDY5		0x0020	/* b5: PIPE5 */
+#define M66592_NRDY4		0x0010	/* b4: PIPE4 */
+#define M66592_NRDY3		0x0008	/* b3: PIPE3 */
+#define M66592_NRDY2		0x0004	/* b2: PIPE2 */
+#define M66592_NRDY1		0x0002	/* b1: PIPE1 */
+#define M66592_NRDY0		0x0001	/* b1: PIPE0 */
+
+#define M66592_BEMPENB		0x3A
+#define M66592_BEMPSTS		0x4A
+#define M66592_BEMP7		0x0080	/* b7: PIPE7 */
+#define M66592_BEMP6		0x0040	/* b6: PIPE6 */
+#define M66592_BEMP5		0x0020	/* b5: PIPE5 */
+#define M66592_BEMP4		0x0010	/* b4: PIPE4 */
+#define M66592_BEMP3		0x0008	/* b3: PIPE3 */
+#define M66592_BEMP2		0x0004	/* b2: PIPE2 */
+#define M66592_BEMP1		0x0002	/* b1: PIPE1 */
+#define M66592_BEMP0		0x0001	/* b0: PIPE0 */
+
+#define M66592_SOFCFG		0x3C
+#define M66592_SOFM		0x000C	/* b3-2: SOF palse mode */
+#define   M66592_SOF_125US	 0x0008   /* SOF OUT 125us uFrame Signal */
+#define   M66592_SOF_1MS	 0x0004   /* SOF OUT 1ms Frame Signal */
+#define   M66592_SOF_DISABLE	 0x0000   /* SOF OUT Disable */
+
+#define M66592_INTSTS0		0x40
+#define M66592_VBINT		0x8000	/* b15: VBUS interrupt */
+#define M66592_RESM		0x4000	/* b14: Resume interrupt */
+#define M66592_SOFR		0x2000	/* b13: SOF frame update interrupt */
+#define M66592_DVST		0x1000	/* b12: Device state transition */
+#define M66592_CTRT		0x0800	/* b11: Control stage transition */
+#define M66592_BEMP		0x0400	/* b10: Buffer empty interrupt */
+#define M66592_NRDY		0x0200	/* b9: Buffer not ready interrupt */
+#define M66592_BRDY		0x0100	/* b8: Buffer ready interrupt */
+#define M66592_VBSTS		0x0080	/* b7: VBUS input port */
+#define M66592_DVSQ		0x0070	/* b6-4: Device state */
+#define   M66592_DS_SPD_CNFG	 0x0070	   /* Suspend Configured */
+#define   M66592_DS_SPD_ADDR	 0x0060	   /* Suspend Address */
+#define   M66592_DS_SPD_DFLT	 0x0050	   /* Suspend Default */
+#define   M66592_DS_SPD_POWR	 0x0040	   /* Suspend Powered */
+#define   M66592_DS_SUSP	 0x0040	   /* Suspend */
+#define   M66592_DS_CNFG	 0x0030	   /* Configured */
+#define   M66592_DS_ADDS	 0x0020	   /* Address */
+#define   M66592_DS_DFLT	 0x0010	   /* Default */
+#define   M66592_DS_POWR	 0x0000	   /* Powered */
+#define M66592_DVSQS		0x0030	/* b5-4: Device state */
+#define M66592_VALID		0x0008	/* b3: Setup packet detected flag */
+#define M66592_CTSQ		0x0007	/* b2-0: Control transfer stage */
+#define   M66592_CS_SQER	 0x0006	  /* Sequence error */
+#define   M66592_CS_WRND	 0x0005	  /* Control write nodata status */
+#define   M66592_CS_WRSS	 0x0004	  /* Control write status stage */
+#define   M66592_CS_WRDS	 0x0003	  /* Control write data stage */
+#define   M66592_CS_RDSS	 0x0002	  /* Control read status stage */
+#define   M66592_CS_RDDS	 0x0001	  /* Control read data stage */
+#define   M66592_CS_IDST	 0x0000	  /* Idle or setup stage */
+
+#define M66592_INTSTS1		0x42
+#define M66592_BCHG		0x4000	/* b14: USB bus chenge interrupt */
+#define M66592_DTCH		0x1000	/* b12: Detach sense interrupt */
+#define M66592_SIGN		0x0020	/* b5: SETUP IGNORE interrupt */
+#define M66592_SACK		0x0010	/* b4: SETUP ACK interrupt */
+
+#define M66592_FRMNUM		0x4C
+#define M66592_OVRN		0x8000	/* b15: Overrun error */
+#define M66592_CRCE		0x4000	/* b14: Received data error */
+#define M66592_SOFRM		0x0800	/* b11: SOF output mode */
+#define M66592_FRNM		0x07FF	/* b10-0: Frame number */
+
+#define M66592_UFRMNUM		0x4E
+#define M66592_UFRNM		0x0007	/* b2-0: Micro frame number */
+
+#define M66592_RECOVER		0x50
+#define M66592_STSRECOV		0x0700	/* Status recovery */
+#define   M66592_STSR_HI	 0x0400		  /* FULL(0) or HI(1) Speed */
+#define   M66592_STSR_DEFAULT	 0x0100		  /* Default state */
+#define   M66592_STSR_ADDRESS	 0x0200		  /* Address state */
+#define   M66592_STSR_CONFIG	 0x0300		  /* Configured state */
+#define M66592_USBADDR		0x007F	/* b6-0: USB address */
+
+#define M66592_USBREQ			0x54
+#define M66592_bRequest			0xFF00	/* b15-8: bRequest */
+#define   M66592_GET_STATUS		 0x0000
+#define   M66592_CLEAR_FEATURE		 0x0100
+#define   M66592_ReqRESERVED		 0x0200
+#define   M66592_SET_FEATURE		 0x0300
+#define   M66592_ReqRESERVED1		 0x0400
+#define   M66592_SET_ADDRESS		 0x0500
+#define   M66592_GET_DESCRIPTOR		 0x0600
+#define   M66592_SET_DESCRIPTOR		 0x0700
+#define   M66592_GET_CONFIGURATION	 0x0800
+#define   M66592_SET_CONFIGURATION	 0x0900
+#define   M66592_GET_INTERFACE		 0x0A00
+#define   M66592_SET_INTERFACE		 0x0B00
+#define   M66592_SYNCH_FRAME		 0x0C00
+#define M66592_bmRequestType		0x00FF	/* b7-0: bmRequestType */
+#define M66592_bmRequestTypeDir		0x0080	/* b7  : Data direction */
+#define   M66592_HOST_TO_DEVICE		 0x0000
+#define   M66592_DEVICE_TO_HOST		 0x0080
+#define M66592_bmRequestTypeType	0x0060	/* b6-5: Type */
+#define   M66592_STANDARD		 0x0000
+#define   M66592_CLASS			 0x0020
+#define   M66592_VENDOR			 0x0040
+#define M66592_bmRequestTypeRecip	0x001F	/* b4-0: Recipient */
+#define   M66592_DEVICE			 0x0000
+#define   M66592_INTERFACE		 0x0001
+#define   M66592_ENDPOINT		 0x0002
+
+#define M66592_USBVAL				0x56
+#define M66592_wValue				0xFFFF	/* b15-0: wValue */
+/* Standard Feature Selector */
+#define   M66592_ENDPOINT_HALT			0x0000
+#define   M66592_DEVICE_REMOTE_WAKEUP		0x0001
+#define   M66592_TEST_MODE			0x0002
+/* Descriptor Types */
+#define M66592_DT_TYPE				0xFF00
+#define M66592_GET_DT_TYPE(v)			(((v) & DT_TYPE) >> 8)
+#define   M66592_DT_DEVICE			0x01
+#define   M66592_DT_CONFIGURATION		0x02
+#define   M66592_DT_STRING			0x03
+#define   M66592_DT_INTERFACE			0x04
+#define   M66592_DT_ENDPOINT			0x05
+#define   M66592_DT_DEVICE_QUALIFIER		0x06
+#define   M66592_DT_OTHER_SPEED_CONFIGURATION	0x07
+#define   M66592_DT_INTERFACE_POWER		0x08
+#define M66592_DT_INDEX				0x00FF
+#define M66592_CONF_NUM				0x00FF
+#define M66592_ALT_SET				0x00FF
+
+#define M66592_USBINDEX			0x58
+#define M66592_wIndex			0xFFFF	/* b15-0: wIndex */
+#define M66592_TEST_SELECT		0xFF00	/* b15-b8: Test Mode */
+#define   M66592_TEST_J			 0x0100	  /* Test_J */
+#define   M66592_TEST_K			 0x0200	  /* Test_K */
+#define   M66592_TEST_SE0_NAK		 0x0300	  /* Test_SE0_NAK */
+#define   M66592_TEST_PACKET		 0x0400	  /* Test_Packet */
+#define   M66592_TEST_FORCE_ENABLE	 0x0500	  /* Test_Force_Enable */
+#define   M66592_TEST_STSelectors	 0x0600	  /* Standard test selectors */
+#define   M66592_TEST_Reserved		 0x4000	  /* Reserved */
+#define   M66592_TEST_VSTModes		 0xC000	  /* Vendor-specific tests */
+#define M66592_EP_DIR			0x0080	/* b7: Endpoint Direction */
+#define   M66592_EP_DIR_IN		 0x0080
+#define   M66592_EP_DIR_OUT		 0x0000
+
+#define M66592_USBLENG		0x5A
+#define M66592_wLength		0xFFFF	/* b15-0: wLength */
+
+#define M66592_DCPCFG		0x5C
+#define M66592_CNTMD		0x0100	/* b8: Continuous transfer mode */
+#define M66592_DIR		0x0010	/* b4: Control transfer DIR select */
+
+#define M66592_DCPMAXP		0x5E
+#define M66592_DEVSEL		0xC000	/* b15-14: Device address select */
+#define   M66592_DEVICE_0	 0x0000		  /* Device address 0 */
+#define   M66592_DEVICE_1	 0x4000		  /* Device address 1 */
+#define   M66592_DEVICE_2	 0x8000		  /* Device address 2 */
+#define   M66592_DEVICE_3	 0xC000		  /* Device address 3 */
+#define M66592_MAXP		0x007F	/* b6-0: Maxpacket size of ep0 */
+
+#define M66592_DCPCTR		0x60
+#define M66592_BSTS		0x8000	/* b15: Buffer status */
+#define M66592_SUREQ		0x4000	/* b14: Send USB request  */
+#define M66592_SQCLR		0x0100	/* b8: Sequence toggle bit clear */
+#define M66592_SQSET		0x0080	/* b7: Sequence toggle bit set */
+#define M66592_SQMON		0x0040	/* b6: Sequence toggle bit monitor */
+#define M66592_CCPL		0x0004	/* b2: control transfer complete */
+#define M66592_PID		0x0003	/* b1-0: Response PID */
+#define   M66592_PID_STALL	 0x0002		  /* STALL */
+#define   M66592_PID_BUF	 0x0001		  /* BUF */
+#define   M66592_PID_NAK	 0x0000		  /* NAK */
+
+#define M66592_PIPESEL		0x64
+#define M66592_PIPENM		0x0007	/* b2-0: Pipe select */
+#define   M66592_PIPE0		 0x0000		  /* PIPE 0 */
+#define   M66592_PIPE1		 0x0001		  /* PIPE 1 */
+#define   M66592_PIPE2		 0x0002		  /* PIPE 2 */
+#define   M66592_PIPE3		 0x0003		  /* PIPE 3 */
+#define   M66592_PIPE4		 0x0004		  /* PIPE 4 */
+#define   M66592_PIPE5		 0x0005		  /* PIPE 5 */
+#define   M66592_PIPE6		 0x0006		  /* PIPE 6 */
+#define   M66592_PIPE7		 0x0007		  /* PIPE 7 */
+
+#define M66592_PIPECFG		0x66
+#define M66592_TYP		0xC000	/* b15-14: Transfer type */
+#define   M66592_ISO		 0xC000		  /* Isochronous */
+#define   M66592_INT		 0x8000		  /* Interrupt */
+#define   M66592_BULK		 0x4000		  /* Bulk */
+#define M66592_BFRE		0x0400	/* b10: Buffer ready interrupt mode */
+#define M66592_DBLB		0x0200	/* b9: Double buffer mode select */
+#define M66592_CNTMD		0x0100	/* b8: Continuous transfer mode */
+#define M66592_SHTNAK		0x0080	/* b7: Transfer end NAK */
+#define M66592_DIR		0x0010	/* b4: Transfer direction select */
+#define   M66592_DIR_H_OUT	 0x0010		  /* HOST OUT */
+#define   M66592_DIR_P_IN	 0x0010		  /* PERI IN */
+#define   M66592_DIR_H_IN	 0x0000		  /* HOST IN */
+#define   M66592_DIR_P_OUT	 0x0000		  /* PERI OUT */
+#define M66592_EPNUM		0x000F	/* b3-0: Eendpoint number select */
+#define   M66592_EP1		 0x0001
+#define   M66592_EP2		 0x0002
+#define   M66592_EP3		 0x0003
+#define   M66592_EP4		 0x0004
+#define   M66592_EP5		 0x0005
+#define   M66592_EP6		 0x0006
+#define   M66592_EP7		 0x0007
+#define   M66592_EP8		 0x0008
+#define   M66592_EP9		 0x0009
+#define   M66592_EP10		 0x000A
+#define   M66592_EP11		 0x000B
+#define   M66592_EP12		 0x000C
+#define   M66592_EP13		 0x000D
+#define   M66592_EP14		 0x000E
+#define   M66592_EP15		 0x000F
+
+#define M66592_PIPEBUF		0x68
+#define M66592_BUFSIZE		0x7C00	/* b14-10: Pipe buffer size */
+#define M66592_BUF_SIZE(x)	((((x) / 64) - 1) << 10)
+#define M66592_BUFNMB		0x00FF	/* b7-0: Pipe buffer number */
+
+#define M66592_PIPEMAXP		0x6A
+#define M66592_MXPS		0x07FF	/* b10-0: Maxpacket size */
+
+#define M66592_PIPEPERI		0x6C
+#define M66592_IFIS		0x1000	/* b12: ISO in-buffer flush mode */
+#define M66592_IITV		0x0007	/* b2-0: ISO interval */
+
+#define M66592_PIPE1CTR		0x70
+#define M66592_PIPE2CTR		0x72
+#define M66592_PIPE3CTR		0x74
+#define M66592_PIPE4CTR		0x76
+#define M66592_PIPE5CTR		0x78
+#define M66592_PIPE6CTR		0x7A
+#define M66592_PIPE7CTR		0x7C
+#define M66592_BSTS		0x8000	/* b15: Buffer status */
+#define M66592_INBUFM		0x4000	/* b14: IN buffer monitor (PIPE 1-5) */
+#define M66592_ACLRM		0x0200	/* b9: Out buffer auto clear mode */
+#define M66592_SQCLR		0x0100	/* b8: Sequence toggle bit clear */
+#define M66592_SQSET		0x0080	/* b7: Sequence toggle bit set */
+#define M66592_SQMON		0x0040	/* b6: Sequence toggle bit monitor */
+#define M66592_PID		0x0003	/* b1-0: Response PID */
+
+#define M66592_INVALID_REG	0x7E
+
+
+#define get_pipectr_addr(pipenum)	(M66592_PIPE1CTR + (pipenum - 1) * 2)
+
+#define M66592_MAX_SAMPLING	10
+
+#define M66592_MAX_NUM_PIPE	8
+#define M66592_MAX_NUM_BULK	3
+#define M66592_MAX_NUM_ISOC	2
+#define M66592_MAX_NUM_INT	2
+
+#define M66592_BASE_PIPENUM_BULK	3
+#define M66592_BASE_PIPENUM_ISOC	1
+#define M66592_BASE_PIPENUM_INT		6
+
+#define M66592_BASE_BUFNUM	6
+#define M66592_MAX_BUFNUM	0x4F
+
+struct m66592_pipe_info {
+	u16	pipe;
+	u16	epnum;
+	u16	maxpacket;
+	u16	type;
+	u16	interval;
+	u16	dir_in;
+};
+
+struct m66592_request {
+	struct usb_request	req;
+	struct list_head	queue;
+};
+
+struct m66592_ep {
+	struct usb_ep		ep;
+	struct m66592		*m66592;
+
+	struct list_head	queue;
+	unsigned		busy:1;
+	unsigned		internal_ccpl:1;	/* use only control */
+
+	/* this member can able to after m66592_enable */
+	unsigned		use_dma:1;
+	u16			pipenum;
+	u16			type;
+	const struct usb_endpoint_descriptor	*desc;
+	/* register address */
+	unsigned long		fifoaddr;
+	unsigned long		fifosel;
+	unsigned long		fifoctr;
+	unsigned long		fifotrn;
+	unsigned long		pipectr;
+};
+
+struct m66592 {
+	spinlock_t		lock;
+	void __iomem		*reg;
+#ifdef CONFIG_HAVE_CLK
+	struct clk *clk;
+#endif
+	struct m66592_platdata	*pdata;
+	unsigned long		irq_trigger;
+
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+
+	struct m66592_ep	ep[M66592_MAX_NUM_PIPE];
+	struct m66592_ep	*pipenum2ep[M66592_MAX_NUM_PIPE];
+	struct m66592_ep	*epaddr2ep[16];
+
+	struct usb_request	*ep0_req;	/* for internal request */
+	__le16			ep0_data;	/* for internal request */
+	u16			old_vbus;
+
+	struct timer_list	timer;
+
+	int			scount;
+
+	int			old_dvsq;
+
+	/* pipe config */
+	int bulk;
+	int interrupt;
+	int isochronous;
+	int num_dma;
+};
+
+#define gadget_to_m66592(_gadget) container_of(_gadget, struct m66592, gadget)
+#define m66592_to_gadget(m66592) (&m66592->gadget)
+
+#define is_bulk_pipe(pipenum)	\
+	((pipenum >= M66592_BASE_PIPENUM_BULK) && \
+	 (pipenum < (M66592_BASE_PIPENUM_BULK + M66592_MAX_NUM_BULK)))
+#define is_interrupt_pipe(pipenum)	\
+	((pipenum >= M66592_BASE_PIPENUM_INT) && \
+	 (pipenum < (M66592_BASE_PIPENUM_INT + M66592_MAX_NUM_INT)))
+#define is_isoc_pipe(pipenum)	\
+	((pipenum >= M66592_BASE_PIPENUM_ISOC) && \
+	 (pipenum < (M66592_BASE_PIPENUM_ISOC + M66592_MAX_NUM_ISOC)))
+
+#define enable_irq_ready(m66592, pipenum)	\
+	enable_pipe_irq(m66592, pipenum, M66592_BRDYENB)
+#define disable_irq_ready(m66592, pipenum)	\
+	disable_pipe_irq(m66592, pipenum, M66592_BRDYENB)
+#define enable_irq_empty(m66592, pipenum)	\
+	enable_pipe_irq(m66592, pipenum, M66592_BEMPENB)
+#define disable_irq_empty(m66592, pipenum)	\
+	disable_pipe_irq(m66592, pipenum, M66592_BEMPENB)
+#define enable_irq_nrdy(m66592, pipenum)	\
+	enable_pipe_irq(m66592, pipenum, M66592_NRDYENB)
+#define disable_irq_nrdy(m66592, pipenum)	\
+	disable_pipe_irq(m66592, pipenum, M66592_NRDYENB)
+
+/*-------------------------------------------------------------------------*/
+static inline u16 m66592_read(struct m66592 *m66592, unsigned long offset)
+{
+	return ioread16(m66592->reg + offset);
+}
+
+static inline void m66592_read_fifo(struct m66592 *m66592,
+		unsigned long offset,
+		void *buf, unsigned long len)
+{
+	void __iomem *fifoaddr = m66592->reg + offset;
+
+	if (m66592->pdata->on_chip) {
+		len = (len + 3) / 4;
+		ioread32_rep(fifoaddr, buf, len);
+	} else {
+		len = (len + 1) / 2;
+		ioread16_rep(fifoaddr, buf, len);
+	}
+}
+
+static inline void m66592_write(struct m66592 *m66592, u16 val,
+				unsigned long offset)
+{
+	iowrite16(val, m66592->reg + offset);
+}
+
+static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat,
+		unsigned long offset)
+{
+	u16 tmp;
+	tmp = m66592_read(m66592, offset);
+	tmp = tmp & (~pat);
+	tmp = tmp | val;
+	m66592_write(m66592, tmp, offset);
+}
+
+#define m66592_bclr(m66592, val, offset)	\
+			m66592_mdfy(m66592, 0, val, offset)
+#define m66592_bset(m66592, val, offset)	\
+			m66592_mdfy(m66592, val, 0, offset)
+
+static inline void m66592_write_fifo(struct m66592 *m66592,
+		struct m66592_ep *ep,
+		void *buf, unsigned long len)
+{
+	void __iomem *fifoaddr = m66592->reg + ep->fifoaddr;
+
+	if (m66592->pdata->on_chip) {
+		unsigned long count;
+		unsigned char *pb;
+		int i;
+
+		count = len / 4;
+		iowrite32_rep(fifoaddr, buf, count);
+
+		if (len & 0x00000003) {
+			pb = buf + count * 4;
+			for (i = 0; i < (len & 0x00000003); i++) {
+				if (m66592_read(m66592, M66592_CFBCFG))	/* le */
+					iowrite8(pb[i], fifoaddr + (3 - i));
+				else
+					iowrite8(pb[i], fifoaddr + i);
+			}
+		}
+	} else {
+		unsigned long odd = len & 0x0001;
+
+		len = len / 2;
+		iowrite16_rep(fifoaddr, buf, len);
+		if (odd) {
+			unsigned char *p = buf + len*2;
+			if (m66592->pdata->wr0_shorted_to_wr1)
+				m66592_bclr(m66592, M66592_MBW_16, ep->fifosel);
+			iowrite8(*p, fifoaddr);
+			if (m66592->pdata->wr0_shorted_to_wr1)
+				m66592_bset(m66592, M66592_MBW_16, ep->fifosel);
+		}
+	}
+}
+
+#endif	/* ifndef __M66592_UDC_H__ */
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mass_storage.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mass_storage.c
new file mode 100644
index 0000000..150c0ab
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mass_storage.c
@@ -0,0 +1,183 @@
+/*
+ * mass_storage.c -- Mass Storage USB Gadget
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyright (C) 2009 Samsung Electronics
+ *                    Author: Michal Nazarewicz <mina86@mina86.com>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+/*
+ * The Mass Storage Gadget acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive or as a CD-ROM drive.  In
+ * addition to providing an example of a genuinely useful gadget
+ * driver for a USB device, it also illustrates a technique of
+ * double-buffering for increased throughput.  Last but not least, it
+ * gives an easy way to probe the behavior of the Mass Storage drivers
+ * in a USB host.
+ *
+ * Since this file serves only administrative purposes and all the
+ * business logic is implemented in f_mass_storage.* file.  Read
+ * comments in this file for more detailed description.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/usb/ch9.h>
+
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_DESC		"Mass Storage Gadget"
+#define DRIVER_VERSION		"2009/09/11"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "f_mass_storage.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor msg_device_desc = {
+	.bLength =		sizeof msg_device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+
+	/* Vendor and product id can be overridden by module parameters.  */
+	.idVendor =		cpu_to_le16(FSG_VENDOR_ID),
+	.idProduct =		cpu_to_le16(FSG_PRODUCT_ID),
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/*
+	 * REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/****************************** Configurations ******************************/
+
+static struct fsg_module_parameters mod_data = {
+	.stall = 1
+};
+FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+
+static unsigned long msg_registered;
+static void msg_cleanup(void);
+
+static int msg_thread_exits(struct fsg_common *common)
+{
+	msg_cleanup();
+	return 0;
+}
+
+static int __init msg_do_config(struct usb_configuration *c)
+{
+	static const struct fsg_operations ops = {
+		.thread_exits = msg_thread_exits,
+	};
+	static struct fsg_common common;
+
+	struct fsg_common *retp;
+	struct fsg_config config;
+	int ret;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	fsg_config_from_params(&config, &mod_data);
+	config.ops = &ops;
+
+	retp = fsg_common_init(&common, c->cdev, &config);
+	if (IS_ERR(retp))
+		return PTR_ERR(retp);
+
+	ret = fsg_bind_config(c->cdev, c, &common);
+	fsg_common_put(&common);
+	return ret;
+}
+
+static struct usb_configuration msg_config_driver = {
+	.label			= "Linux File-Backed Storage",
+	.bConfigurationValue	= 1,
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+
+/****************************** Gadget Bind ******************************/
+
+static int __init msg_bind(struct usb_composite_dev *cdev)
+{
+	int status;
+
+	printk("##########MSG_BIND \n");
+	status = usb_add_config(cdev, &msg_config_driver, msg_do_config);
+	if (status < 0)
+		return status;
+
+	dev_info(&cdev->gadget->dev,
+		 DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+	set_bit(0, &msg_registered);
+	return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+static struct usb_composite_driver msg_driver = {
+	.name		= "g_mass_storage",
+	.dev		= &msg_device_desc,
+	.iProduct	= DRIVER_DESC,
+	.max_speed	= USB_SPEED_SUPER,
+	.needs_serial	= 1,
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+static int __init msg_init(void)
+{
+	return usb_composite_probe(&msg_driver, msg_bind);
+}
+module_init(msg_init);
+
+static void msg_cleanup(void)
+{
+	if (test_and_clear_bit(0, &msg_registered))
+		usb_composite_unregister(&msg_driver);
+}
+module_exit(msg_cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mbim.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mbim.h
new file mode 100755
index 0000000..6e0edf5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mbim.h
@@ -0,0 +1,160 @@
+/*******************************************************************************

+ * Copyright (C) 2007, ZTE Corporation.

+ *

+ * File Name:

+ * File Mark:

+ * Description:

+ * Others:

+ * Version:       1.0

+ * Date:          2020-12-30

+ * History 1:

+ *     Date:

+ *     Version:

+ *     Author:

+ *     Modification:

+ * History 2:

+  ********************************************************************************/

+

+

+

+#define    MBIM_OPEN_MSG              0x00000001

+#define    MBIM_CLOSE_MSG             0x00000002

+#define    MBIM_COMMAND_MSG           0x00000003

+#define    MBIM_HOST_ERROR_MSG        0x00000004

+#define    MBIM_OPEN_DONE             0x80000001

+#define    MBIM_CLOSE_DONE            0x80000002

+#define    MBIM_COMMAND_DONE          0x80000003

+#define    MBIM_FUNCTION_ERROR_MSG    0x80000004

+#define    MBIM_INDICATE_STATUS_MSG   0x80000007

+

+

+#define    MBIM_CID_DEVICE_CAPS  	1

+#define    MBIM_CID_SUBSCRIBER_READY_STATUS  	2

+#define    MBIM_CID_RADIO_STATE 	3

+#define    MBIM_CID_PIN	4

+#define    MBIM_CID_PIN_LIST  	5

+#define    MBIM_CID_HOME_PROVIDER  	6

+#define    MBIM_CID_PREFERRED_PROVIDERS  	7

+#define    MBIM_CID_VISIBLE_PROVIDERS  	8

+#define    MBIM_CID_REGISTER_STATE 	9

+#define    MBIM_CID_PACKET_SERVICE  	10

+#define    MBIM_CID_SIGNAL_STATE  	11

+#define    MBIM_CID_CONNECT  	12

+#define    MBIM_CID_PROVISIONED_CONTEXTS 	13

+#define    MBIM_CID_SERVICE_ACTIVATION  	14

+#define    MBIM_CID_IP_CONFIGURATION 	15

+#define    MBIM_CID_DEVICE_SERVICES  	16

+#define    MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST  	19

+#define    MBIM_CID_PACKET_STATISTICS  	20

+#define    MBIM_CID_NETWORK_IDLE_HINT  	21

+#define    MBIM_CID_EMERGENCY_MODE  	22

+#define    MBIM_CID_IP_PACKET_FILTERS  	23

+#define    MBIM_CID_MULTICARRIER_PROVIDERS  	24

+#define    MBIM_CID_SMS_CONFIGURATION  	1

+#define    MBIM_CID_SMS_READ  2

+#define    MBIM_CID_SMS_SEND  	3

+#define    MBIM_CID_SMS_DELETE  	4

+#define    MBIM_CID_SMS_MESSAGE_STORE_STATUS	5

+#define    MBIM_CID_USSD  	1

+#define    MBIM_CID_PHONEBOOK_CONFIGURATION  	1

+#define    MBIM_CID_PHONEBOOK_READ  	2

+#define    MBIM_CID_PHONEBOOK_DELETE  	3

+#define    MBIM_CID_PHONEBOOK_WRITE  	4

+#define    MBIM_CID_STK_PAC  	1

+#define    MBIM_CID_STK_TERMINAL_RESPONSE  	2

+#define    MBIM_CID_STK_ENVELOPE  3

+#define    MBIM_CID_AKA_AUTH 	1

+#define    MBIM_CID_AKAP_AUTH 	2

+#define    MBIM_CID_SIM_AUTH 	3

+#define    MBIM_CID_DSS_CONNECT 	1

+

+

+#define MBIM_UUID_LEN  16

+

+typedef struct mbim_indicate_status_msg_type

+{

+	u32	MessageType;

+	u32	MessageLength;

+	u32	TransactionId;

+	u32	TotalFragments;

+	u32	CurrentFragment;

+	u8	DeviceServiceId[MBIM_UUID_LEN];

+	u32	CID;

+	u32	InformationBufferLength;

+	u32	InformationBuffer;

+} mbim_indicate_status_msg_type;

+

+typedef struct mbim_open_msg_type

+{

+	__le32	MessageType;

+	__le32	MessageLength;

+	__le32	TransactionId;

+	__le32	MaxControlTransfer;

+} mbim_open_msg_type;

+

+

+typedef struct mbim_close_msg_type

+{

+	__le32	MessageType;

+	__le32	MessageLength;

+	__le32	TransactionId;

+} mbim_close_msg_type;

+

+

+typedef struct mbim_command_msg_type

+{

+	u32	MessageType;

+	u32	MessageLength;

+	u32	TransactionId;

+	u32	TotalFragments;

+	u32	CurrentFragment;

+	u8	DeviceServiceId[MBIM_UUID_LEN];

+	u32	CID;

+	u32	Commandtype;

+	u32	InformationBufferLength;

+	u32	InformationBuffer;

+} mbim_command_msg_type;

+

+

+

+

+struct mbim_ncm_info{

+    uint16_t  nth_block_len ; 

+    //uint16_t  ndp_head_len ; 

+    uint16_t  ndp_datagram_off ; 

+    uint16_t  ndp_datagram_len ; 

+

+};

+

+struct mbim_ntb_aligned_info 

+{

+

+	uint16_t	wNdpInDivisor;

+	uint16_t	wNdpInPayloadRemainder;

+	uint16_t	wNdpInAlignment;

+	uint16_t	wNdpOutDivisor;

+	uint16_t	wNdpOutPayloadRemainder;

+	uint16_t	wNdpOutAlignment;

+} ;

+

+int  mbim_register(void (*resp_avail)(void *v), void *v);

+int mbim_get_nth16_and_ndp16_size(void);

+int mbim_ncm16_and_ndp16_init(char * buf ,int len);

+int mbim_fill_ncm16_vary_head_info(char *buf ,struct mbim_ncm_info *info);

+int mbim_get_first_ndp16_offset(struct            usb_request *req);

+int mbim_get_next_datagram_fragment(struct           usb_request *req ,int prev_ndp_off, int* cur_first_datagram ,int *next_ndp);

+int mbim_get_reverse_head_size() ;

+int mbim_get_trans_buffer_size(void);

+

+

+

+

+

+

+

+

+

+

+

+

+

diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi.c
new file mode 100644
index 0000000..c37fb33
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi.c
@@ -0,0 +1,362 @@
+/*
+ * multi.c -- Multifunction Composite driver
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+
+#if defined USB_ETH_RNDIS
+#  undef USB_ETH_RNDIS
+#endif
+#ifdef CONFIG_USB_G_MULTI_RNDIS
+#  define USB_ETH_RNDIS y
+#endif
+
+
+#define DRIVER_DESC		"Multifunction Composite Gadget"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+
+/***************************** All the files... *****************************/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_mass_storage.c"
+
+#include "u_serial.c"
+#include "f_acm.c"
+
+#include "f_ecm.c"
+#include "f_subset.c"
+#ifdef USB_ETH_RNDIS
+#  include "f_rndis.c"
+#  include "rndis.c"
+#endif
+#include "u_ether.c"
+
+
+
+/***************************** Device Descriptor ****************************/
+
+#define MULTI_VENDOR_NUM	0x1d6b	/* Linux Foundation */
+#define MULTI_PRODUCT_NUM	0x0104	/* Multifunction Composite Gadget */
+
+
+enum {
+	__MULTI_NO_CONFIG,
+#ifdef CONFIG_USB_G_MULTI_RNDIS
+	MULTI_RNDIS_CONFIG_NUM,
+#endif
+#ifdef CONFIG_USB_G_MULTI_CDC
+	MULTI_CDC_CONFIG_NUM,
+#endif
+};
+
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+
+	.bDeviceClass =		USB_CLASS_MISC /* 0xEF */,
+	.bDeviceSubClass =	2,
+	.bDeviceProtocol =	1,
+
+	/* Vendor and product id can be overridden by module parameters.  */
+	.idVendor =		cpu_to_le16(MULTI_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(MULTI_PRODUCT_NUM),
+};
+
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &(struct usb_otg_descriptor){
+		.bLength =		sizeof(struct usb_otg_descriptor),
+		.bDescriptorType =	USB_DT_OTG,
+
+		/*
+		 * REVISIT SRP-only hardware is possible, although
+		 * it would not be called "OTG" ...
+		 */
+		.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+	},
+	NULL,
+};
+
+
+enum {
+#ifdef CONFIG_USB_G_MULTI_RNDIS
+	MULTI_STRING_RNDIS_CONFIG_IDX,
+#endif
+#ifdef CONFIG_USB_G_MULTI_CDC
+	MULTI_STRING_CDC_CONFIG_IDX,
+#endif
+};
+
+static struct usb_string strings_dev[] = {
+#ifdef CONFIG_USB_G_MULTI_RNDIS
+	[MULTI_STRING_RNDIS_CONFIG_IDX].s = "Multifunction with RNDIS",
+#endif
+#ifdef CONFIG_USB_G_MULTI_CDC
+	[MULTI_STRING_CDC_CONFIG_IDX].s   = "Multifunction with CDC ECM",
+#endif
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&(struct usb_gadget_strings){
+		.language	= 0x0409,	/* en-us */
+		.strings	= strings_dev,
+	},
+	NULL,
+};
+
+
+
+
+/****************************** Configurations ******************************/
+
+static struct fsg_module_parameters fsg_mod_data = { .stall = 1 };
+FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
+
+static struct fsg_common fsg_common;
+
+static u8 hostaddr[ETH_ALEN];
+
+
+/********** RNDIS **********/
+
+#ifdef USB_ETH_RNDIS
+
+static __init int rndis_do_config(struct usb_configuration *c)
+{
+	int ret;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	ret = rndis_bind_config(c, hostaddr);
+	if (ret < 0)
+		return ret;
+
+	ret = acm_bind_config(c, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = fsg_bind_config(c->cdev, c, &fsg_common);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int rndis_config_register(struct usb_composite_dev *cdev)
+{
+	static struct usb_configuration config = {
+		.bConfigurationValue	= MULTI_RNDIS_CONFIG_NUM,
+		.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+	};
+
+	config.label          = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].s;
+	config.iConfiguration = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].id;
+
+	return usb_add_config(cdev, &config, rndis_do_config);
+}
+
+#else
+
+static int rndis_config_register(struct usb_composite_dev *cdev)
+{
+	return 0;
+}
+
+#endif
+
+
+/********** CDC ECM **********/
+
+#ifdef CONFIG_USB_G_MULTI_CDC
+
+static __init int cdc_do_config(struct usb_configuration *c)
+{
+	int ret;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	ret = ecm_bind_config(c, hostaddr);
+	if (ret < 0)
+		return ret;
+
+	ret = acm_bind_config(c, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = fsg_bind_config(c->cdev, c, &fsg_common);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int cdc_config_register(struct usb_composite_dev *cdev)
+{
+	static struct usb_configuration config = {
+		.bConfigurationValue	= MULTI_CDC_CONFIG_NUM,
+		.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+	};
+
+	config.label          = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].s;
+	config.iConfiguration = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].id;
+
+	return usb_add_config(cdev, &config, cdc_do_config);
+}
+
+#else
+
+static int cdc_config_register(struct usb_composite_dev *cdev)
+{
+	return 0;
+}
+
+#endif
+
+
+
+/****************************** Gadget Bind ******************************/
+
+
+static int __ref multi_bind(struct usb_composite_dev *cdev)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	int status, gcnum;
+
+	if (!can_support_ecm(cdev->gadget)) {
+		dev_err(&gadget->dev, "controller '%s' not usable\n",
+		        gadget->name);
+		return -EINVAL;
+	}
+
+	/* set up network link layer */
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		return status;
+
+	/* set up serial link layer */
+	status = gserial_setup(cdev->gadget, 1);
+	if (status < 0)
+		goto fail0;
+
+	/* set up mass storage function */
+	{
+		void *retp;
+		retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data);
+		if (IS_ERR(retp)) {
+			status = PTR_ERR(retp);
+			goto fail1;
+		}
+	}
+
+	/* set bcdDevice */
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0) {
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	} else {
+		WARNING(cdev, "controller '%s' not recognized\n", gadget->name);
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
+	}
+
+	/* allocate string IDs */
+	status = usb_string_ids_tab(cdev, strings_dev);
+	if (unlikely(status < 0))
+		goto fail2;
+
+	/* register configurations */
+	status = rndis_config_register(cdev);
+	if (unlikely(status < 0))
+		goto fail2;
+
+	status = cdc_config_register(cdev);
+	if (unlikely(status < 0))
+		goto fail2;
+
+	/* we're done */
+	dev_info(&gadget->dev, DRIVER_DESC "\n");
+	fsg_common_put(&fsg_common);
+	return 0;
+
+
+	/* error recovery */
+fail2:
+	fsg_common_put(&fsg_common);
+fail1:
+	gserial_cleanup();
+fail0:
+	gether_cleanup();
+	return status;
+}
+
+static int __exit multi_unbind(struct usb_composite_dev *cdev)
+{
+	gserial_cleanup();
+	gether_cleanup();
+	return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+
+static struct usb_composite_driver multi_driver = {
+	.name		= "g_multi",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= __exit_p(multi_unbind),
+	.iProduct	= DRIVER_DESC,
+	.needs_serial	= 1,
+};
+
+
+static int __init multi_init(void)
+{
+	return usb_composite_probe(&multi_driver, multi_bind);
+}
+module_init(multi_init);
+
+static void __exit multi_exit(void)
+{
+	usb_composite_unregister(&multi_driver);
+}
+module_exit(multi_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi_packet.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi_packet.c
new file mode 100755
index 0000000..eab1ea0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi_packet.c
@@ -0,0 +1,1962 @@
+#include <mach/dma.h>

+#include "linux/dmaengine.h"

+

+#include <linux/kthread.h>

+//#include <linux/Timer.h>

+#include <linux/semaphore.h>

+#include <linux/dma-mapping.h>

+#include <net/SI/ext_mem.h>

+#include <linux/skbuff.h>

+#include <linux/completion.h>

+#include <linux/wait.h>

+#include "multi_packet.h"

+#include "rndis.h"

+#include "mbim.h"

+

+//#include <linux/android_notify.h>

+#include <mach/highspeed_debug.h>

+#include <mach/dma_cfg.h>

+#include <linux/delay.h>

+

+

+#include <linux/slab.h>

+#include <linux/kernel.h>

+#include <linux/device.h>

+#include <linux/etherdevice.h>

+#include <linux/list.h>

+#include <linux/atomic.h>

+#include <linux/miscdevice.h>

+#include <linux/vmalloc.h>

+#include <linux/crc32.h>

+#include <linux/if_vlan.h>

+#include <asm-generic/ioctl.h>

+

+

+extern unsigned int get_panic_flag(void);

+extern unsigned int usb_get_rndis_list_max_flag(void);

+extern int mbim_get_work_mode(void);

+

+

+#define MULTIPACKET_MAXNUM		10

+

+#define DMA_LINK_LIST_MAX_NUM		MULTIPACKET_MAXNUM

+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)

+

+#define RNDIS_MSGHEAD_SIZE				sizeof(struct rndis_packet_msg_type)

+#define RNDIS_PACKET_ALIGNMENT_FACTOR	1

+#define RNDIS_PACKET_ALIGNMENT			(2<<RNDIS_PACKET_ALIGNMENT_FACTOR)

+#define RNDIS_ALIGN(X)		((X+RNDIS_MSGHEAD_SIZE+RNDIS_PACKET_ALIGNMENT-1)&~(int)(RNDIS_PACKET_ALIGNMENT-1))    

+

+#define USB_BIND_DMA_BUF_NUM	 8	//8*16*1024	//½ÓÊÕ¡¢·¢ËÍ×î´ó¸÷240K

+#define USB_UNBIND_DMA_BUF_NUM	 6	//6*16*1024	//½ÓÊÕ¡¢·¢ËÍ×î´ó¸÷240K

+

+#if MULTIPACKET_BUF_ALLOC

+#else

+#define USB_DMA_TX_BUF_ADDR  DMA_RAM_FOR_USB_ADDR_BASE 

+#define USB_DMA_RX_BUF_ADDR  DMA_RAM_FOR_USB_ADDR_BASE + USB_DMA_BUF_SIZE

+#endif

+	

+#define USB_MAX_BUF_NUM		16     //336		

+#define USB_RNDIS_PKT_MAXSIZE   1568

+#define USB_VIRTUAL_PACKET_MAXSIZE   USB_RNDIS_PKT_MAXSIZE

+#define PACKET_BUF_EXTRA_NUM    6

+

+

+#define USB_BIND_TIMER_EXPIRES			10/5

+#define MIN(x,y)		  		((x) < (y) ? (x) : (y))

+

+

+

+#define USE_DMA_TRANSFER 1

+

+#define USE_ONLY_LIST	0

+

+#define  RNDIS_NAME_STR  "rndis"

+#define  MBIM_NAME_STR   "mbim"

+

+struct usb_multi_packet {

+

+	int	numInTrans;	

+	struct usb_ep		*ep;

+

+	unsigned	int 		dmaChannelAlloc;

+	struct dma_chan * pdmaChannel;

+	//dma_channel_def	dmaChannelCfg[DMA_LINK_LIST_MAX_NUM];

+	//struct completion	dmaCmplt;	

+	struct semaphore	dmaSem;

+	

+	/* vnic packet list */

+	struct list_head	vincPkt_list;

+	atomic_t			vnicPkt_num;	

+	spinlock_t		vnicPkt_spinLock;	

+

+	atomic_t			count;	

+	/* usb buf list*/

+	struct list_head	usbBuf_list;

+	atomic_t		usbBuft_num;	

+	spinlock_t		usbBuf_spinLock;	/* guard usb pool buf list */

+#if 1//USE_ONLY_LIST

+	//struct usb_request * reqNode[USB_MAX_BUF_NUM];

+	volatile int	reqRdPos;	

+	volatile int	reqWrPos;	

+	volatile int	reqNum;	

+	spinlock_t	reqSpinLock;	/* guard usb pool buf list */

+	wait_queue_head_t reqWait;	

+#endif

+	wait_queue_head_t wait;

+	struct task_struct	*thread;

+

+	struct timer_list	timer; 	

+#if MULTIPACKET_BUF_ALLOC

+	int buf_alloc_state;

+	int dma_running;

+#endif

+#ifdef PKT_UNBIND

+	int pkt_num;

+#endif

+    uint8_t net_type ;   //0:δ֪ £¬ 1£ºrndisÍø¿¨£¬2£ºmbimÍø¿¨ 

+    int (*wrap) (struct usb_multi_packet * , struct usb_request *) ;

+    int (*unwrap)(struct usb_multi_packet * , struct usb_request *) ;

+    int maxPacketNum ;

+    struct usb_request ** reqNode;

+    dma_channel_def	*dmaChannelCfg;

+    unsigned trans_buffer_size ;

+    

+    

+};

+

+struct multi_packet{

+

+	volatile int maxPacketNum; 

+

+	unsigned int active;

+	

+	struct usb_gadget *gadget;	

+	struct gether *geth;

+	struct usb_multi_packet bind;

+	struct usb_multi_packet unbind;

+    void *alloc_mem ;

+};

+

+

+static struct multi_packet multiPacket = {0};

+//static struct usb_multi_packet *bind = NULL;

+//static struct usb_multi_packet *unbind = NULL;

+

+//unsigned int g_VNIC_MultiPacket_MaxNum = 10;

+int multi_packet_get_maxnum(void);

+

+extern void dev_kfree_skb_any(struct sk_buff *skb);

+extern int get_vnic_multi_packet_num(void);

+

+void dma_callback(void *data)

+{

+	struct usb_multi_packet *dev = (struct usb_multi_packet *)data;

+#if MULTIPACKET_BUF_ALLOC

+	dev->dma_running = 0;

+#endif

+	up(&dev->dmaSem);

+	//complete(&dev->dmaCmplt);

+	//USBSTACK_DBG("dma callback");

+}

+

+

+int dma_Scatter_Trans(struct usb_multi_packet * dev, unsigned int Cnt )

+{

+#if USE_DMA_TRANSFER

+	signed int ret = 0;	

+	struct dma_async_tx_descriptor *desc =NULL;

+	dev->dmaChannelCfg[Cnt-1].link_addr = 0;

+	ret=dmaengine_slave_config(dev->pdmaChannel, 

+						(struct dma_slave_config*)dev->dmaChannelCfg);

+

+	if (ret!= 0)

+	{

+		USBSTACK_DBG("dma config err: %d ", ret);

+		USB_ASSERT(0," DMA ");

+		return ret;

+	}

+

+	/* start transfer */

+

+	desc = dev->pdmaChannel->device->device_prep_interleaved_dma(dev->pdmaChannel,NULL,0);

+	desc->callback = (dma_async_tx_callback)dma_callback;

+	desc->callback_param = (void *) dev;

+

+	/*zx29_chan->zx29_dma_cookie = */dmaengine_submit(desc);

+	dma_async_issue_pending(dev->pdmaChannel);

+#if MULTIPACKET_BUF_ALLOC

+	dev->dma_running = 1;

+#endif

+

+	//wait_for_completion_interruptible(&dev->dmaCmplt);

+	down(&dev->dmaSem);

+	return ret;		

+

+#else

+	int i = 0;

+	for( i=0;i<Cnt;i++)

+	{

+		memcpy(dev->dmaChannelCfg[i].dest_addr, 

+				dev->dmaChannelCfg[i].src_addr, 

+				dev->dmaChannelCfg[i].count);

+	}

+	return 0;

+#endif

+}

+

+static bool dma_filterFn(struct dma_chan *chan, void *param)

+{

+	return true;

+}

+

+

+void clean_vnic_packet_list(struct usb_multi_packet *multiPkt)

+{

+    unsigned long flags;

+	struct usb_request	*req = NULL;

+

+	spin_lock_irqsave(&multiPkt->vnicPkt_spinLock, flags);

+	while (!list_empty(&multiPkt->vincPkt_list))

+	{

+		req = container_of(multiPkt->vincPkt_list.next, struct usb_request, list);

+		list_del(&req->list);

+        spin_unlock_irqrestore(&multiPkt->vnicPkt_spinLock, flags);

+#if 0//USE_DMA_TRANSFER

+		if(psbuff_virt_to_phys(req->buf, &req->dma) == 0)

+		{

+			usb_gadget_unmap_request(multiPacket.gadget, req, 1);

+		}

+#endif

+		dev_kfree_skb_any(req->context);

+		usb_ep_free_request(multiPkt->ep, req);

+		spin_lock_irqsave(&multiPkt->vnicPkt_spinLock, flags);

+	}

+	spin_unlock_irqrestore(&multiPkt->vnicPkt_spinLock, flags);

+}

+

+void mbim_change_rx_complete(usb_complete_t __complete)

+{

+

+    unsigned long flags;

+	struct usb_request	*req = NULL;

+	struct usb_multi_packet *punbind = &multiPacket.unbind;

+	int cnt = 0;

+	struct list_head *pkt_head = &punbind->vincPkt_list;

+	struct list_head *pkt_next = pkt_head->next;

+	spin_lock_irqsave(&punbind->vnicPkt_spinLock, flags);

+	if(list_empty(&punbind->vincPkt_list)){

+		spin_unlock_irqrestore(&punbind->vnicPkt_spinLock, flags);

+		printk("mbim_change_rx_complete, vincPkt_list\r\n");

+		

+		return;

+	}

+	while (pkt_next != pkt_head)

+	{

+		req = container_of(pkt_next, struct usb_request, list);

+		req->complete = __complete;

+		cnt++;

+		pkt_next = pkt_next->next;

+	}

+

+	spin_unlock_irqrestore(&punbind->vnicPkt_spinLock, flags);

+	printk("mbim_change_rx_complete, cnt:%d, complete:%p\r\n", cnt, __complete);

+}

+

+void u_ether_rx_vnic_packet_list(void)

+{

+    unsigned long flags;

+	struct usb_request	*req = NULL;

+	struct usb_multi_packet *punbind = &multiPacket.unbind;

+

+    if(multi_packet_get_maxnum() <=1 )

+        return;

+

+	spin_lock_irqsave(&punbind->vnicPkt_spinLock, flags);

+	while (!list_empty(&punbind->vincPkt_list))

+	{

+		req = container_of(punbind->vincPkt_list.next, struct usb_request, list);

+		list_del(&req->list);

+		spin_unlock_irqrestore(&punbind->vnicPkt_spinLock, flags);

+        atomic_dec(&punbind->vnicPkt_num);

+		req->status = -ESHUTDOWN;

+		req->complete(punbind->ep, req);

+		spin_lock_irqsave(&punbind->vnicPkt_spinLock, flags);

+	}

+    /* ½«´ó°üÊý×éÖеÄÊý¾ÝÇå³ý*/

+    punbind->reqRdPos = 0;

+    punbind->reqWrPos = 0;

+	spin_unlock_irqrestore(&punbind->vnicPkt_spinLock, flags);

+}

+void u_ether_tx_vnic_packet_list(void)

+{

+    unsigned long		flags;

+	struct usb_request	*req = NULL;

+    struct usb_multi_packet *bind = &multiPacket.bind;

+

+    if(multi_packet_get_maxnum() <=1 )

+        return;

+

+	spin_lock_irqsave(&bind->vnicPkt_spinLock, flags);

+	while (!list_empty(&bind->vincPkt_list))

+	{

+		req = container_of(bind->vincPkt_list.next, struct usb_request, list);

+		list_del(&req->list);

+		spin_unlock_irqrestore(&bind->vnicPkt_spinLock, flags);

+        atomic_dec(&bind->vnicPkt_num);

+		req->status = -ESHUTDOWN;

+		req->complete(bind->ep, req);

+		spin_lock_irqsave(&bind->vnicPkt_spinLock, flags);

+	}

+    bind->reqRdPos = 0;

+	bind->reqWrPos = 0;

+	spin_unlock_irqrestore(&bind->vnicPkt_spinLock, flags);

+}

+

+/***************************************************************************  

+ *						RNDISÍø¿¨Ð­Òé×é°ü/²ð°ü  

+ ****************************************************************************/

+/* RNDIS ²ð°üº¯Êý */

+int rndis_unwrap(struct usb_multi_packet * unbind, struct usb_request *req)

+{

+    unsigned long flags;

+	int tempLength = req->actual;

+	unsigned char * tempBufAddr = req->buf;

+	unsigned char * tempDmaAddr = req->dma;

+	

+	struct rndis_packet_msg_type *head = NULL;

+	struct usb_request *temp_req= NULL;

+

+	int index = 0;

+	struct list_head *temp_list =  &unbind->vincPkt_list;

+

+#if MULTIPACKET_BUF_ALLOC

+	if((req->buf == NULL) || (req->dma == NULL))

+	{

+		USBSTACK_DBG("req buf or dma is null\n");

+		return 0;

+	}

+#endif

+	if(req->status !=0)

+	{

+		//¼ì²éreq״̬ÊÇ·ñÕý³£

+		//USB_ASSERT(0, "REQ: 0x%x", req);

+		USBSTACK_DBG("req->status: %d", req->status);

+		return req->status;

+	}

+	

+	spin_lock_irqsave(&unbind->vnicPkt_spinLock, flags);

+	if(list_empty(&unbind->vincPkt_list)){

+		USBSTACK_DBG("%s unbind vincPkt list NULL", __func__);

+		temp_list = NULL;

+	}

+	spin_unlock_irqrestore(&unbind->vnicPkt_spinLock, flags);

+		

+	if(temp_list == NULL){

+		USBSTACK_DBG("rndis_unwrap vnic Pktlist is NULL");

+		return 0;	

+	}

+	

+	do

+	{

+#if MULTIPACKET_BUF_ALLOC

+		//Èç¹û½â°üʱÓÐreq µÄbufΪ¿Õ£¬Ôò·µ»Ø

+		if(tempBufAddr == NULL){

+			USBSTACK_DBG("req is freel\n");

+			break;

+		}

+#endif

+		head = (struct rndis_packet_msg_type *)tempBufAddr;

+		if(head->MessageType != cpu_to_le32(REMOTE_NDIS_PACKET_MSG))

+		{

+			//ͷУÑé³ö´í£¬¶ª°ü¡£¡£

+			USBSTACK_DBG("USB RNDIS HEAD ERR :%0x\n",head->MessageType);

+ 			break;

+		}

+

+		if(tempLength < (unsigned short)head->MessageLength)

+		{

+			//Ê£Ó೤¶È²»×㣬¶ª°ü¡£¡£

+			USBSTACK_DBG("USB RNDIS DATA LENGTH ERR templen:%d,msg len:%d\n",tempLength,(unsigned short)head->MessageLength);

+			break;

+		}

+

+		spin_lock_irqsave(&unbind->vnicPkt_spinLock, flags);

+		if(list_empty(&unbind->vincPkt_list)){

+			USBSTACK_DBG("%s unbind vincPkt list NULL", __func__);

+			temp_list = NULL;

+		}

+		else

+			temp_list = temp_list->next;		

+		spin_unlock_irqrestore(&unbind->vnicPkt_spinLock, flags);

+		

+		if(temp_list == &unbind->vincPkt_list){

+			USBSTACK_DBG("unbind get the head of list\n");

+			return index;

+		}

+		

+		if(temp_list == NULL){

+			if(kthread_should_stop()){

+				USBSTACK_DBG("unbind is kill and unwrap exit");

+               return index;

+			}else{

+#if 0

+                msleep(2);

+                continue;

+#else

+

+               return index;

+#endif

+			}

+		}

+

+		unbind->dmaChannelCfg[index].count = (unsigned short)head->DataLength;

+

+#if USE_DMA_TRANSFER

+		unbind->dmaChannelCfg[index].link_addr = 1;

+		unbind->dmaChannelCfg[index].src_addr = (unsigned int)(tempDmaAddr + 8  \

+												+ head->DataOffset);

+#else

+		unbind->dmaChannelCfg[index].src_addr = (unsigned int)(tempBufAddr + 8  \

+												+ head->DataOffset);

+#endif

+

+		++index;

+

+	

+		temp_req = container_of(temp_list, struct usb_request, list);

+		

+#if USE_DMA_TRANSFER

+		unbind->dmaChannelCfg[index-1].dest_addr = (unsigned int)temp_req->dma;

+#else

+		unbind->dmaChannelCfg[index-1].dest_addr = (unsigned int)temp_req->buf;

+#endif

+

+		temp_req->actual = (unsigned short)head->DataLength;

+

+		tempLength -= (unsigned short)head->MessageLength;

+		if(tempLength <= RNDIS_MSGHEAD_SIZE)

+		{

+			break;   //²ð°ü½áÊø

+		}

+

+		tempBufAddr += (unsigned short)head->MessageLength;

+		tempDmaAddr += (unsigned short)head->MessageLength;

+	    	

+	}while(unbind->maxPacketNum -index);

+		

+	return index;

+}

+

+/* RNDIS ×é°üÌí¼ÓRNDISÍ·º¯Êý */

+int rndis_add_head(unsigned int msgAddr, unsigned msgLen)

+{

+	struct rndis_packet_msg_type *head = NULL;

+

+	head = (struct rndis_packet_msg_type *)msgAddr;

+

+	memset((void*)head, 0, RNDIS_MSGHEAD_SIZE);

+	head->MessageType = cpu_to_le32(REMOTE_NDIS_PACKET_MSG);

+	head->MessageLength = cpu_to_le32(RNDIS_ALIGN(msgLen));

+	head->DataOffset = cpu_to_le32(36);

+	head->DataLength = cpu_to_le32(msgLen);

+

+	return head->MessageLength;	

+}

+/* RNDIS ×é°üº¯Êý */

+int rndis_wrap(struct usb_multi_packet * bind, struct usb_request *req)

+{

+	int index = 0;  	

+	int bindPacketNum = 0;

+	dma_addr_t	tempDmaAddr = req->dma;

+	unsigned int	tempBufAddr = (unsigned int)req->buf;

+	unsigned int	tempLen = 0;

+	unsigned long			flags;

+	struct usb_request *temp_req = NULL;

+	struct list_head *temp_list =  NULL;

+

+	req->length = 0;

+

+	spin_lock_irqsave(&bind->vnicPkt_spinLock, flags);

+	temp_list =  &bind->vincPkt_list;

+	

+	spin_unlock_irqrestore(&bind->vnicPkt_spinLock, flags);

+

+	bindPacketNum = atomic_read(&bind->vnicPkt_num);

+	bindPacketNum = MIN(bindPacketNum, bind->maxPacketNum);	

+	

+	if(bindPacketNum < 1)

+		return bindPacketNum;

+	

+	do

+	{	

+		spin_lock_irqsave(&bind->vnicPkt_spinLock, flags);

+		temp_list = temp_list->next;

+		

+		if(temp_list == 	&bind->vincPkt_list){

+			USBSTACK_DBG("bind get the head of list\n");

+			spin_unlock_irqrestore(&bind->vnicPkt_spinLock, flags);

+			return index;

+		}

+		

+		temp_req = container_of(temp_list, struct usb_request, list);

+		spin_unlock_irqrestore(&bind->vnicPkt_spinLock, flags);

+

+#if USE_DMA_TRANSFER

+

+		//USBSTACK_DBG("index: %d, list: 0x%x", index, &temp_req->list);

+		//USBSTACK_DBG("index: %d, src: 0x%x", index, temp_req->dma);

+		bind->dmaChannelCfg[index].link_addr = 1;

+		bind->dmaChannelCfg[index].src_addr = (unsigned int)temp_req->dma;

+		bind->dmaChannelCfg[index].dest_addr =(unsigned int)(tempDmaAddr + RNDIS_MSGHEAD_SIZE);

+#else

+		bind->dmaChannelCfg[index].src_addr = (unsigned int)temp_req->buf;

+		bind->dmaChannelCfg[index].dest_addr =(unsigned int)(tempBufAddr + RNDIS_MSGHEAD_SIZE);

+#endif

+		bind->dmaChannelCfg[index].count = (unsigned short)temp_req->length;

+		

+		tempLen = rndis_add_head(tempBufAddr, temp_req->length);

+		

+		tempDmaAddr += tempLen;

+		tempBufAddr += tempLen;

+		

+		req->length += tempLen;

+		

+		index++;			

+	}while(bindPacketNum -index);

+

+    return bindPacketNum;

+}

+

+

+

+/***************************************************************************  

+ *						MBIMÍø¿¨Ð­Òé×é°ü/²ð°ü  

+ ****************************************************************************/

+/* MBIM ²ð°üº¯Êý */

+int mbim_unwrap(struct usb_multi_packet * unbind, struct usb_request *req)

+{

+    unsigned long flags;

+	unsigned char * tempBufAddr = req->buf;

+	unsigned char * tempDmaAddr = req->dma;

+	

+	struct usb_request *temp_req= NULL;

+

+	int index = 0;

+	struct list_head *temp_list =  &unbind->vincPkt_list;

+    int cur_first_datagram ;

+    int first_ndp_index ;

+    int next_ndp_index ;

+    int nfram ;

+    uint16_t datagram_len ;

+    uint16_t datagram_index ;

+    int j = 0 ;

+    int head_size = 0;

+

+#if MULTIPACKET_BUF_ALLOC

+	if((req->buf == NULL) || (req->dma == NULL))

+	{

+		USBSTACK_DBG("req buf or dma is null\n");

+		return 0;

+	}

+#endif

+	if(req->status !=0)

+	{

+		//¼ì²éreq״̬ÊÇ·ñÕý³£

+		//USB_ASSERT(0, "REQ: 0x%x", req);

+		USBSTACK_DBG("req->status: %d", req->status);

+		return req->status;

+	}

+	

+	spin_lock_irqsave(&unbind->vnicPkt_spinLock, flags);

+	if(list_empty(&unbind->vincPkt_list)){

+		USBSTACK_DBG("%s unbind vincPkt list NULL", __func__);

+		temp_list = NULL;

+	}

+	spin_unlock_irqrestore(&unbind->vnicPkt_spinLock, flags);

+		

+	if(temp_list == NULL){

+		USBSTACK_DBG("rndis_unwrap vnic Pktlist is NULL");

+		return 0;	

+	}

+	//°üÍ·¼ì²â

+    first_ndp_index = mbim_get_first_ndp16_offset(req) ;

+    if(first_ndp_index <=0)

+    {

+        USBSTACK_DBG("[%s]:mbim ntb is not completed, first_ndp_index = %d\n",__func__,first_ndp_index ) ;

+        return first_ndp_index ;

+    }

+    //printk("[func]:%s, [line]:%d , first_ndp_index = %d \n",__func__,__LINE__,first_ndp_index ) ;

+	do

+	{

+#if MULTIPACKET_BUF_ALLOC

+		//Èç¹û½â°üʱÓÐreq µÄbufΪ¿Õ£¬Ôò·µ»Ø

+		if(tempBufAddr == NULL){

+			USBSTACK_DBG("req is freel\n");

+			break;

+		}

+#endif

+		//ÅжÏÊÇ·ñ´óÓÚreq->read

+

+		//printk("[func]:%s, [line]:%d , first_ndp_index = %d \n",__func__,__LINE__,first_ndp_index ) ;

+

+        //»ñÈ¡ÏÂÒ»¸ö°ü

+        nfram = mbim_get_next_datagram_fragment(req, first_ndp_index, &cur_first_datagram ,&next_ndp_index) ;

+        if( nfram < 0 )

+        {

+            USBSTACK_DBG("[func]:%s ,[line]: %d , mbim ntb is not complete ,nfram = %d \n",__func__,__LINE__ ,nfram) ;

+            return index ;

+        }

+		//printk("[func]:%s ,[line]: %d , nfram = %d ,cur_first_datagram = %d ,next_ndp_index= %d \n",__func__,__LINE__ ,

+		// nfram ,cur_first_datagram ,next_ndp_index) ;

+        head_size = mbim_get_reverse_head_size() ;

+        for(j = 0 ;j< nfram;j++ )

+        {

+            //DMAÅäÖÃ

+            if(index == unbind->maxPacketNum)

+            {

+                   //printk("[func]:%s ,[line]: %d , mbim ntb is too big  \n",__func__,__LINE__ ) ;

+                   return index;

+            }

+            int i = 0 ;

+            

+            datagram_index = get_unaligned_le16( req->buf+cur_first_datagram +j*4);

+            datagram_len = get_unaligned_le16( req->buf+cur_first_datagram + j*4+2);

+			//printk("[func]:%s ,[line]: %d , datagram_index= %d,datagram_len = %d \n",__func__,__LINE__,datagram_index,datagram_len) ;

+ 

+            if(datagram_index == 0 || datagram_len == 0)

+            {

+                USBSTACK_DBG("[func]:%s ,[line]: %d , datagram_index= %d,datagram_len = %d \n",__func__,__LINE__,datagram_index,datagram_len) ;

+                break ;

+

+            }

+			

+            if( req->buf + req->actual < req->buf + datagram_index +datagram_len )

+            {

+                USBSTACK_DBG("[func]:%s ,[line]: %d , datagram_index= %d,datagram_len = %d , req->actual = %d \n",

+                    __func__,__LINE__,datagram_index,datagram_len, req->actual) ;

+                return -EFAULT ;

+            }

+    		spin_lock_irqsave(&unbind->vnicPkt_spinLock, flags);

+    		if(list_empty(&unbind->vincPkt_list))

+            {

+    			USBSTACK_DBG("%s unbind vincPkt list NULL", __func__);

+    			temp_list = NULL;

+    		}

+    		else

+    		{

+    			temp_list = temp_list->next;

+            }

+	

+    		spin_unlock_irqrestore(&unbind->vnicPkt_spinLock, flags);

+    		

+    		if(temp_list == &unbind->vincPkt_list)

+            {

+                USBSTACK_DBG("unbind get the head of list\n");

+                return index;

+    		}

+

+    		if(temp_list == NULL)

+            {

+                USBSTACK_DBG("unbind is kill and unwrap exit");

+				return index;

+    		}

+            

+            unbind->dmaChannelCfg[index].count = (unsigned short)datagram_len ;

+    

+#if USE_DMA_TRANSFER

+            unbind->dmaChannelCfg[index].link_addr = 1;

+            unbind->dmaChannelCfg[index].src_addr = (unsigned int)(tempDmaAddr +datagram_index);

+#else

+            unbind->dmaChannelCfg[index].src_addr = (unsigned int)(tempBufAddr + datagram_index) ;

+#endif

+    

+        

+            temp_req = container_of(temp_list, struct usb_request, list);

+            

+#if USE_DMA_TRANSFER

+            unbind->dmaChannelCfg[index].dest_addr = (unsigned int)temp_req->dma + head_size;

+#else

+            unbind->dmaChannelCfg[index].dest_addr = (unsigned int)temp_req->buf + head_size;

+#endif

+    

+            temp_req->actual = (unsigned short)datagram_len;

+

+            //printk("[func]:%s ,[line]: %d , src_addr = 0x%x,dst_addr = 0x%x \n",__func__,__LINE__,

+			//	unbind->dmaChannelCfg[index].src_addr,unbind->dmaChannelCfg[index].dest_addr) ;

+ 

+            //¸üÐÂ

+            index++ ;

+            

+        }

+     

+        if(next_ndp_index == 0)

+        {

+           break ;

+        }

+        first_ndp_index = next_ndp_index ; 

+

+	}while(unbind->maxPacketNum - index);

+		

+	return index;

+}

+

+

+/* MBIM ×é°üº¯Êý */

+int mbim_wrap(struct usb_multi_packet * bind, struct usb_request *req)

+{

+	int index = 0;  	

+	int bindPacketNum = 0;

+	dma_addr_t	tempDmaAddr = req->dma;

+	unsigned int	tempBufAddr = (unsigned int)req->buf;

+	unsigned int	tempLen = 0;

+	unsigned long			flags;

+	struct usb_request *temp_req = NULL;

+	struct list_head *temp_list =  NULL;

+    int ncm_head_size = 0 ; 

+    uint16_t cur_index = 0 ;

+    struct mbim_ncm_info ncm_info ;

+	req->length = 0;

+	spin_lock_irqsave(&bind->vnicPkt_spinLock, flags);

+	temp_list =  &bind->vincPkt_list;

+	

+	spin_unlock_irqrestore(&bind->vnicPkt_spinLock, flags);

+

+	bindPacketNum = atomic_read(&bind->vnicPkt_num);

+	bindPacketNum = MIN(bindPacketNum, bind->maxPacketNum);	

+	if(bindPacketNum < 1)

+		return bindPacketNum;

+	//³õʼ»¯Í·ÐÅÏ¢

+    ncm_head_size = mbim_get_nth16_and_ndp16_size();

+    mbim_ncm16_and_ndp16_init((char *)tempBufAddr, ncm_head_size+1) ;

+    tempBufAddr += ncm_head_size ;

+    req->length += ncm_head_size ;

+    tempDmaAddr += ncm_head_size ;

+    cur_index += ncm_head_size ;

+    

+	do

+	{	

+		spin_lock_irqsave(&bind->vnicPkt_spinLock, flags);

+		temp_list = temp_list->next;

+		

+		if(temp_list == 	&bind->vincPkt_list){

+			USBSTACK_DBG("bind get the head of list\n");

+			spin_unlock_irqrestore(&bind->vnicPkt_spinLock, flags);

+			return index;

+		}

+		

+		temp_req = container_of(temp_list, struct usb_request, list);

+		spin_unlock_irqrestore(&bind->vnicPkt_spinLock, flags);

+

+

+#if USE_DMA_TRANSFER

+

+		//USBSTACK_DBG("index: %d, list: 0x%x", index, &temp_req->list);

+		//USBSTACK_DBG("index: %d, src: 0x%x", index, temp_req->dma);

+		bind->dmaChannelCfg[index].link_addr = 1;

+		bind->dmaChannelCfg[index].src_addr = (unsigned int)temp_req->dma;

+		bind->dmaChannelCfg[index].dest_addr =(unsigned int)tempDmaAddr;

+#else

+		bind->dmaChannelCfg[index].src_addr = (unsigned int)temp_req->buf;

+		bind->dmaChannelCfg[index].dest_addr =(unsigned int)tempBufAddr;

+#endif

+		bind->dmaChannelCfg[index].count = (unsigned short)temp_req->length;

+		//printk("index = %d \n",index ,(unsigned long)tempDmaAddr , (unsigned long)(temp_req->dma + temp_req->real));

+		

+		//ÐÞ¸ÄÍ·ÐÅÏ¢

+        ncm_info.ndp_datagram_off = cur_index ;

+        ncm_info.ndp_datagram_len =(unsigned short)temp_req->length ;

+        cur_index += (unsigned short)temp_req->length;

+        cur_index = ((cur_index+3)>>2)<<2 ; //ÏòÉÏ4×Ö½Ú¶ÔÆë

+        ncm_info.nth_block_len = cur_index;

+        

+		if(mbim_fill_ncm16_vary_head_info(req->buf, &ncm_info)<0) 

+		{

+            USBSTACK_DBG("[%s]: fill ntb head info err\n",__func__) ;

+            return -EFAULT ;

+        }

+		tempDmaAddr = (unsigned int)req->dma + cur_index;

+		tempBufAddr += (unsigned int)req->buf +cur_index;

+		req->length = cur_index;

+    	//USBSTACK_DBG("[%s]: [line]:%d ,index=%d ....\n",__func__,__LINE__,index) ;			

+		index++;			

+	}while(bindPacketNum -index);

+

+    return bindPacketNum;

+}

+

+

+int virtual_network_wrap(struct usb_multi_packet * bind, struct usb_request *req)

+{

+    int ret = 0 ;

+    if(bind->wrap)

+    {

+        ret = bind->wrap(bind,  req) ;

+        if(ret > 0 && (req->length % bind->ep->maxpacket == 0) )

+        {

+            req->zero = 1 ;

+        }else

+        {

+            req->zero = 0 ;

+        }

+        return  ret ;

+    }

+

+    USBSTACK_DBG("[%s]:net type is unknown \n",__func__) ;

+    return -EINVAL ;

+

+

+

+}

+

+

+int virtual_network_unwrap(struct usb_multi_packet * unbind, struct usb_request *req)

+{

+

+    if(unbind->unwrap)

+    {

+        return unbind->unwrap(unbind,  req) ;

+    }

+    USBSTACK_DBG("[%s]:net type is unknown \n",__func__) ;

+    return  -EINVAL ;

+

+

+}

+

+

+void usbUnbind_ep_queue(struct usb_multi_packet *punbind)

+{

+	int		retval = -ENOMEM;

+	struct usb_request	*req= NULL;

+

+	req = punbind->reqNode[punbind->reqWrPos];

+

+#if MULTIPACKET_BUF_ALLOC

+	if(req == NULL){

+		usb_printk("usbUnbind_ep_queue, req is null\n");

+		return;

+	}

+#endif

+

+	punbind->reqWrPos = (punbind->reqWrPos+1)%punbind->reqNum;

+    atomic_inc(&punbind->usbBuft_num);

+    

+	//spin_unlock_irqrestore(&punbind->reqSpinLock, flags);

+	req->length = punbind->trans_buffer_size ;

+	//memset(req->buf, 0, req->length);

+	//USBSTACK_DBG("multi-rx len: %d", req->length);

+	retval = usb_ep_queue(punbind->ep, req, GFP_ATOMIC);

+	if(retval)

+	{

+		USBSTACK_DBG("rx err value: %d, req:0x%p", retval,req );

+	}

+}

+

+void usbUnbind_rx_complete(struct usb_ep *ep, struct usb_request *req)

+{

+	struct usb_multi_packet *punbind = &multiPacket.unbind;

+

+#if MULTIPACKET_BUF_ALLOC

+	if(punbind->buf_alloc_state == 0)

+	{

+		USBSTACK_DBG("usbUnbind_rx_complete,buf is free\n");

+		usb_printk("usbUnbind_rx_complete,buf is free\n");

+		return;

+	}

+#endif

+	if((multiPacket.active==0)&&(req->status < 0))

+	{

+		USBSTACK_DBG("Rx cmplt, req->status:%d", req->status);

+		return;

+	}

+	

+	//USBSTACK_DBG("Rx-cmpl len: %d", req->actual);

+	//if node ring is not full

+	if((punbind->reqWrPos+1)%punbind->reqNum != punbind->reqRdPos)

+	{

+		usbUnbind_ep_queue(punbind)	;

+	}

+	atomic_inc(&punbind->count);

+

+#ifndef PKT_UNBIND

+	wake_up(&punbind->wait);

+#else

+	if(req->actual <= USB_VIRTUAL_PACKET_MAXSIZE){

+		punbind->pkt_num++;

+	}

+

+	if((punbind->pkt_num >= 5) || (req->actual > USB_VIRTUAL_PACKET_MAXSIZE)){

+		wake_up(&punbind->wait);

+	}else if(punbind->pkt_num == 1){

+		mod_timer(&punbind->timer, (jiffies + USB_BIND_TIMER_EXPIRES));

+	}

+#endif

+}

+

+/*·µ»Ø½ÓÊÕ»º³å»»ÄÚ»ýѹδ´¦ÀíµÄ½ÚµãÊý*/

+int usbUnbind_num_backlog(struct usb_multi_packet *punbind)

+{

+#if 0

+	int 	RdPos;

+	int	WrPos;

+	unsigned long		flags;

+	spin_lock_irqsave(&punbind->reqSpinLock, flags);

+	RdPos = punbind->reqRdPos;

+	WrPos = punbind->reqWrPos;

+	spin_unlock_irqrestore(&punbind->reqSpinLock, flags);

+

+	USBSTACK_DBG("RdPos:%d, WrPos:%d", RdPos,WrPos);

+

+	if(RdPos<= WrPos)

+	{

+		return WrPos- RdPos;

+	}

+	else

+	{

+		return punbind->reqNum+ WrPos- RdPos;

+	}

+#else

+	int n = atomic_read(&punbind->count);

+	//USBSTACK_DBG("rxCount :%d", n);

+	return n;

+#endif

+

+}

+

+unsigned int eth_offline_num = 0;

+unsigned int packet_num = 0;

+int usbUnbind_thread(void *ptr)

+{

+	unsigned long			flags;

+	int		retval = -ENOMEM;

+	int wait_event_ret = 0;

+	struct usb_multi_packet *punbind = (struct usb_multi_packet *)ptr;

+	struct usb_request	*req = NULL;

+	struct usb_ep *ep = punbind->ep;

+       struct gether *port = container_of(ep, struct gether, out_ep);

+	struct eth_dev	*dev = container_of(port, struct eth_dev, port_usb);

+	//struct sched_param param = { .sched_priority = 1 };

+	//sched_setscheduler(current, SCHED_FIFO, &param);

+	USBSTACK_DBG("unbind thread entry!");

+

+	atomic_set(&punbind->count, 0);

+

+	struct sched_param sch_param = { .sched_priority = 1 };

+	sch_param.sched_priority = 37;

+	sched_setscheduler(current, SCHED_FIFO, &sch_param);

+	

+	while(!kthread_should_stop())

+	{

+		wait_event_ret = wait_event_interruptible(punbind->wait,  

+			usbUnbind_num_backlog(punbind)||kthread_should_stop());

+		

+		if(kthread_should_stop())

+		{

+			USBSTACK_DBG("unbind thread stop");

+			break;

+		}

+		

+		if(wait_event_ret < 0)

+			continue;

+		

+		atomic_dec(&punbind->count);

+		if(dev->eth_state== 0){

+			eth_offline_num++;

+			if(eth_offline_num == 1 || eth_offline_num%3000 == 0){

+				usb_printk("%s, %u portname:%s\n", __func__, __LINE__, port->func.name);

+				USBSTACK_DBG("%s, %u portname:%s\n", __func__, __LINE__, port->func.name);

+			}

+

+			return -ESHUTDOWN;

+		}

+		

+#if USE_ONLY_LIST

+		spin_lock_irqsave(&punbind->usbBuf_spinLock, flags);

+		req = container_of(punbind->usbBuf_list.next, struct usb_request, list);

+		list_del_init(&req->list);

+		spin_unlock_irqrestore(&punbind->usbBuf_spinLock, flags);

+        atomic_dec(&punbind->usbBuft_num);

+#else

+		spin_lock_irqsave(&punbind->reqSpinLock, flags);

+		req = punbind->reqNode[punbind->reqRdPos];

+		spin_unlock_irqrestore(&punbind->reqSpinLock, flags);		

+#endif

+#if MULTIPACKET_BUF_ALLOC

+		if(req == NULL){

+			usb_printk("%s, %u req is null\n", __func__, __LINE__);

+			continue;

+		}

+#endif

+		//printk("-----------unbind,read len:%d\r\n", req->actual);

+		punbind->numInTrans = virtual_network_unwrap(punbind, req);

+		if(punbind->numInTrans == 1){

+			packet_num++;

+		}else if(packet_num > 0){

+			packet_num = 0;

+			if(sch_param.sched_priority == 36){

+				sch_param.sched_priority = 37;

+				sched_setscheduler(current, SCHED_FIFO, &sch_param);

+			}

+		}

+			

+		if((packet_num > 5) && (sch_param.sched_priority == 37)){

+			sch_param.sched_priority = 36;

+			sched_setscheduler(current, SCHED_FIFO, &sch_param);

+		}

+		if((punbind->numInTrans <= 0)||(punbind->numInTrans>10))

+		{

+			USBSTACK_DBG(" punbind->numInTrans err : %d", punbind->numInTrans);

+		}

+		else

+		{

+			//USBSTACK_DBG("multi-rx num: %d", unbindPacketNum);

+			dma_Scatter_Trans(punbind, punbind->numInTrans);

+			

+			spin_lock_irqsave(&punbind->vnicPkt_spinLock, flags);

+			do

+			{

+			    if (list_empty(&punbind->vincPkt_list)){

+					USBSTACK_DBG("%s unbind vincPkt_list is NULL", __func__);

+					break;

+			    }

+				req = container_of(punbind->vincPkt_list.next, struct usb_request, list);

+				list_del_init(&req->list);

+				spin_unlock_irqrestore(&punbind->vnicPkt_spinLock, flags);

+

+				atomic_dec(&punbind->vnicPkt_num);

+

+#if USE_DMA_TRANSFER

+				usb_gadget_unmap_request(multiPacket.gadget, req, 0);

+#endif

+				//USBSTACK_DBG("Rx len: %d", req->actual);

+				if(multiPacket.active)

+				{

+					if(punbind->numInTrans == 1){

+						struct sk_buff	*skb = req->context;

+						skb_set_last_pkg(skb);

+					}

+					req->complete(punbind->ep, req);

+				}

+				else

+				{

+					dev_kfree_skb_any(req->context);	

+					usb_ep_free_request(punbind->ep, req);

+				}

+				spin_lock_irqsave(&punbind->vnicPkt_spinLock, flags);

+				

+			}while(--punbind->numInTrans);

+			spin_unlock_irqrestore(&punbind->vnicPkt_spinLock, flags);

+		}

+		

+		spin_lock_irqsave(&punbind->reqSpinLock, flags);

+		punbind->reqRdPos = (punbind->reqRdPos+1)%punbind->reqNum;

+		spin_unlock_irqrestore(&punbind->reqSpinLock, flags);

+        atomic_dec(&punbind->usbBuft_num);

+		

+		if(punbind->reqRdPos == punbind->reqWrPos)

+		{

+			//spin_unlock_irqrestore(&punbind->reqSpinLock, flags);		

+			usbUnbind_ep_queue(punbind);

+		}

+	}

+

+	clean_vnic_packet_list(punbind);

+

+	eth_offline_num = 0;

+

+	USBSTACK_DBG("unbind thread exit!");

+	return 0;

+}

+

+

+void usbBind_timerCallBack(unsigned long data)

+{

+	struct usb_multi_packet *pbind = (struct usb_multi_packet *)data;

+	wake_up(&pbind->wait);

+	//USBSTACK_DBG("wake_up");

+	return;

+}

+

+#ifdef PKT_UNBIND

+void usbunBind_timerCallBack(unsigned long data)

+{

+	struct usb_multi_packet *punbind = (struct usb_multi_packet *)data;

+	wake_up(&punbind->wait);

+	//USBSTACK_DBG("wake_up");

+	return;

+}

+#endif

+

+void usbBind_tx_complete(struct usb_ep *ep, struct usb_request *req)

+{

+	unsigned long			flags;

+	struct usb_multi_packet *pbind = &multiPacket.bind;

+

+#if MULTIPACKET_BUF_ALLOC

+	if(pbind->buf_alloc_state == 0){

+		USBSTACK_DBG("usbBind_tx_completebuf alloc state:%d\n",pbind->buf_alloc_state);

+		usb_printk("usbBind_tx_completebuf alloc state:%d\n",pbind->buf_alloc_state);

+		return;

+	}

+#endif

+	spin_lock_irqsave(&pbind->usbBuf_spinLock, flags);

+	req->zero = 0;

+	if(list_empty(&pbind->usbBuf_list))

+	{

+		list_add_tail(&req->list, &pbind->usbBuf_list);

+		pbind->reqRdPos = (pbind->reqRdPos+1)%pbind->reqNum;

+		spin_unlock_irqrestore(&pbind->usbBuf_spinLock, flags);

+		atomic_inc(&pbind->usbBuft_num);

+		atomic_set(&pbind->count, 1);

+		wake_up(&pbind->reqWait);

+	}

+	else

+	{

+		list_add_tail(&req->list, &pbind->usbBuf_list);

+		pbind->reqRdPos = (pbind->reqRdPos+1)%pbind->reqNum;

+		spin_unlock_irqrestore(&pbind->usbBuf_spinLock, flags);

+		atomic_inc(&pbind->usbBuft_num);

+	}

+

+	//USBSTACK_DBG("Tx-cmpl len: %d", req->actual);

+}

+

+

+void usbBind_ep_queue(struct usb_multi_packet *pbind, struct usb_request *req)

+{

+	int	retval=0;

+	unsigned long			flags;

+	struct usb_ep *ep = pbind->ep;

+	struct gether *port = container_of(ep, struct gether, in_ep);

+	struct eth_dev	*dev = container_of(port, struct eth_dev, port_usb);

+

+	req->zero = ((req->length)%(pbind->ep->maxpacket)==0) ? 1 : 0;

+	//dsb();

+	retval = usb_ep_queue(pbind->ep, req, GFP_ATOMIC);

+	//USBSTACK_DBG("multi-tx len: %d", req->length);

+

+	if (retval==0) 

+	{

+		do

+		{

+			spin_lock_irqsave(&pbind->vnicPkt_spinLock, flags);

+			if (list_empty(&pbind->vincPkt_list))

+			{

+			    pbind->numInTrans = 0;

+				BUG_ON(atomic_read(&pbind->vnicPkt_num));

+				USBSTACK_DBG("%s bind vnicPkt list is NULL!", __func__);

+				spin_unlock_irqrestore(&pbind->vnicPkt_spinLock, flags);//11

+				break;

+			}

+			req = container_of(pbind->vincPkt_list.next, struct usb_request, list);

+			list_del_init(&req->list);

+			spin_unlock_irqrestore(&pbind->vnicPkt_spinLock, flags);

+

+			atomic_dec(&pbind->vnicPkt_num);

+			//USBSTACK_DBG("%d, release list:0x%x, dma: 0x%x", gbindPacketNum,&req->list,req->dma);

+			

+#if 0//USE_DMA_TRANSFER

+			if(psbuff_virt_to_phys(req->buf, &req->dma) == 0)

+			{

+				usb_gadget_unmap_request(multiPacket.gadget, req, 1);

+			}

+#endif

+			req->status = (multiPacket.active == 0) ? -ESHUTDOWN : 0;

+			req->complete(pbind->ep, req);

+			if(multiPacket.active == 0)

+			{

+				USBSTACK_DBG("bind deactive !\n");

+			}

+		}while(--pbind->numInTrans);

+	}

+	else

+	{

+		if(multiPacket.active && dev->eth_state == 1)

+		{

+			USB_ASSERT(0, "tx fail value(%d)", retval);

+		}

+		else

+		{

+			/* ÍøÂçÁ´Â·¶Ï¿ªµ¼ÖÂUSB·¢ËÍʧ°Ü ÊÇ·ñÐèÒªÇå³ýС°ü¶ÓÁкÍÊÍ·ÅС°üSKB£¬»¹ÊÇÒÀ¾ÉÔ¤ÁôÔÚС°ü¶ÓÁÐÖÐ*/

+//			atomic_set(&pbind->vnicPkt_num, 0);	

+			USBSTACK_DBG("bind deactive and net is disconnect!");

+		}

+		/*¹é»¹´ó°ü¶ÓÁÐ*/

+		usbBind_tx_complete(pbind->ep, req);

+	}	

+}

+

+int usbBind_thread(void *ptr)

+{

+	unsigned long			flags;

+	struct usb_multi_packet *pbind = (struct usb_multi_packet *)ptr;

+	struct usb_request	*req = NULL;

+	struct usb_ep *ep = pbind->ep;

+	struct gether *port = container_of(ep, struct gether, in_ep);

+	struct eth_dev	*dev = container_of(port, struct eth_dev, port_usb);

+	int wait_event_ret = 0;

+	

+	atomic_set(&pbind->count, 1);

+

+	USBSTACK_DBG("bind thread entry!");

+	struct sched_param sch_param = { .sched_priority = 1 };

+	sch_param.sched_priority = 37;

+	sched_setscheduler(current, SCHED_FIFO, &sch_param);

+

+	

+

+	while(!kthread_should_stop())

+	{

+		wait_event_ret = wait_event_interruptible(pbind->wait, 

+			(atomic_read(&pbind->vnicPkt_num) && multiPacket.active == 1)||kthread_should_stop());

+

+		if(kthread_should_stop())

+		{

+			USBSTACK_DBG("bind thread stop-1");

+			break;

+		}

+		

+		if(wait_event_ret < 0)	//only deal stop signal, others will continue wait condition	

+			continue;

+		

+		if(dev->eth_state== 0){

+			eth_offline_num++;

+			if(eth_offline_num == 1 || eth_offline_num%3000 == 0){

+				usb_printk("%s, %u portname:%s\n", __func__, __LINE__, port->func.name);

+				USBSTACK_DBG("%s, %u portname:%s\n", __func__, __LINE__, port->func.name);

+			}

+

+			return -ESHUTDOWN;

+		}

+

+		/* ´ÓUSB-BUFÖлñÈ¡×é°ü¿Õ¼ä */

+		spin_lock_irqsave(&pbind->usbBuf_spinLock, flags);

+

+		if (list_empty(&pbind->usbBuf_list)) {

+			atomic_set(&pbind->count, 0);

+			spin_unlock_irqrestore(&pbind->usbBuf_spinLock, flags);

+			USBSTACK_DBG("bind-buf is empty");

+wait_reqwait_signal:			

+			wait_event_ret = wait_event_interruptible(pbind->reqWait, 

+				atomic_read(&pbind->count)||kthread_should_stop());

+

+			if(kthread_should_stop())

+			{

+				USBSTACK_DBG("bind thread stop-2");

+				break;

+			}

+			

+			if(wait_event_ret < 0)	//only deal stop signal, others will continue wait reqWait condition	

+				goto wait_reqwait_signal;

+

+			spin_lock_irqsave(&pbind->usbBuf_spinLock, flags);

+		}

+	

+		

+		req = container_of(pbind->usbBuf_list.next, struct usb_request, list);

+#if MULTIPACKET_BUF_ALLOC

+		if(req == NULL || multiPacket.active == 0){

+			usb_printk("%s, %u maybe req is null or mp active:%d\n", __func__, __LINE__, multiPacket.active);

+			spin_unlock_irqrestore(&pbind->usbBuf_spinLock, flags);

+			continue;

+		}

+#endif

+		list_del_init(&req->list);

+		pbind->reqWrPos = (pbind->reqWrPos+1)%pbind->reqNum;

+		spin_unlock_irqrestore(&pbind->usbBuf_spinLock, flags);

+        atomic_dec(&pbind->usbBuft_num);

+		

+		/* ¸ù¾ÝÍø¿¨Ð­Ò飬·â×°ÍøÂçÊý¾Ý°ü£¬ÅäÖÃ×é°ü²ÎÊý */		

+		pbind->numInTrans = virtual_network_wrap(pbind, req);

+		//USBSTACK_DBG("multi-tx num: %d", bindPacketNum);

+		

+		if(pbind->numInTrans > 0){

+			/* ¸ù¾Ý×é°ü²ÎÊý£¬DMA°áÔËÊý¾Ýµ½Ö¸¶¨Î»Öã¬Íê³É×é°ü */

+			dma_Scatter_Trans(pbind, pbind->numInTrans);

+			//printk("--------bind tx,data len:%d!\n", req->length);

+

+			/* USB·¢ËÍ×éºÏÊý¾Ý°ü */

+			usbBind_ep_queue(pbind, req);

+		}

+	}

+

+	/* ½áÊø×é°üÏß³ÌǰÇåÀíVNICÁ´±íÒÔ¼°BUF*/

+	clean_vnic_packet_list(pbind);

+

+	eth_offline_num = 0;

+

+	USBSTACK_DBG("bind thread exit!");

+	return 0;

+}

+

+

+void usb_multi_pkt_dma_init(struct usb_multi_packet *multiPkt, int is_bind)

+{

+	int index = 0;

+	signed int ret = 0;

+	dma_channel_def *pDmaDef = NULL;

+	dma_cap_mask_t mask;

+	

+	//init_completion(&multiPkt->dmaCmplt);

+	sema_init(&multiPkt->dmaSem, 0);

+	

+	dma_cap_zero(mask);

+	dma_cap_set(DMA_SLAVE, mask);

+

+	if(multiPkt->dmaChannelAlloc == 0)

+	{

+		multiPkt->pdmaChannel = dma_request_channel(mask, zx29_dma_filter_fn,

+											(void*)DMA_CH_MEMORY);	

+		if(!multiPkt->pdmaChannel)

+		{

+			USBSTACK_DBG("request dma channel fail!!!!");

+			return;

+		}

+		multiPkt->dmaChannelAlloc =1;

+	}

+

+	pDmaDef = multiPkt->dmaChannelCfg;

+    printk("[func]:%s ,[line]:%d ,channel num= %d\n",__func__,__LINE__,multiPacket.maxPacketNum) ;

+

+	for(index=0;index < multiPacket.maxPacketNum;index++)

+	{	

+		pDmaDef[index].dma_control.tran_mode = TRAN_MEM_TO_MEM;

+		pDmaDef[index].dma_control.irq_mode =  DMA_ALL_IRQ_ENABLE;

+		pDmaDef[index].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;

+		pDmaDef[index].dma_control.src_burst_len = DMA_BURST_LEN_16;

+		pDmaDef[index].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;

+		pDmaDef[index].dma_control.dest_burst_len = DMA_BURST_LEN_16;

+	}

+}

+

+#if MULTIPACKET_BUF_ALLOC

+int usb_multi_pkt_list_init(struct usb_multi_packet *multiPkt, int is_bind)

+{

+	/* init vnic packet list */

+	INIT_LIST_HEAD(&multiPkt->vincPkt_list);

+	spin_lock_init(&multiPkt->vnicPkt_spinLock);

+

+	atomic_set(&multiPkt->vnicPkt_num, 0);

+	

+	/* init usb buf list */

+	INIT_LIST_HEAD(&multiPkt->usbBuf_list);

+    atomic_set(&multiPkt->usbBuft_num, 0);

+	spin_lock_init(&multiPkt->usbBuf_spinLock);	

+

+	multiPkt->reqRdPos = 0;

+	multiPkt->reqWrPos = 0;

+	spin_lock_init(&multiPkt->reqSpinLock);	

+

+	return 0;

+}

+#else

+int usb_multi_pkt_buf_init(struct usb_multi_packet *multiPkt, int is_bind)

+{

+	int i = 0;

+	unsigned int usbBuf_num = 0;

+	struct usb_request	*req = NULL;

+

+    //mark....... 

+	int addr_offset = USB_VIRTUAL_PACKET_MAXSIZE*multi_packet_get_maxnum();

+

+	i = USB_DMA_BUF_SIZE/addr_offset;

+	USBSTACK_DBG("USB BUF SIZE %d!", i);

+	

+	/* init vnic packet list */

+	INIT_LIST_HEAD(&multiPkt->vincPkt_list);

+	spin_lock_init(&multiPkt->vnicPkt_spinLock);

+

+	atomic_set(&multiPkt->vnicPkt_num, 0);

+	

+	/* init usb buf list */

+	INIT_LIST_HEAD(&multiPkt->usbBuf_list);

+    atomic_set(&multiPkt->usbBuft_num, 0);

+	spin_lock_init(&multiPkt->usbBuf_spinLock);	

+

+	multiPkt->reqNum = i;

+	multiPkt->reqRdPos = 0;

+	multiPkt->reqWrPos = 0;

+	spin_lock_init(&multiPkt->reqSpinLock);	

+	while (i--) 

+	{

+		req = usb_ep_alloc_request(multiPkt->ep, GFP_ATOMIC);

+		if (!req){

+            USBSTACK_DBG("%s, bind:%d, Warning Alloc Request Fail!", __func__, is_bind);

+			return list_empty(&multiPkt->usbBuf_list) ? -ENOMEM : 0;

+		}

+

+		if(is_bind)

+		{

+			req->dma = USB_DMA_TX_BUF_ADDR + addr_offset*i;

+			req->complete = usbBind_tx_complete;

+		}

+		else

+		{

+			req->dma = USB_DMA_RX_BUF_ADDR + addr_offset*i;

+			req->complete = usbUnbind_rx_complete ;

+		}

+

+		req->buf = ioremap(req->dma, addr_offset);

+        BUG_ON(req->buf == NULL);

+		

+		list_add_tail(&req->list, &multiPkt->usbBuf_list);

+		usbBuf_num = atomic_inc_return(&multiPkt->usbBuft_num);

+		multiPkt->reqNode[multiPkt->reqNum - i-1] = req;

+	}

+    USBSTACK_DBG("%s, bind:%d, i:%d, usbBuf_num:%d", __func__, is_bind, i, usbBuf_num);

+	return 0;

+}

+

+#endif

+static int usb_multi_pkt_init(struct usb_multi_packet *multiPkt, int is_bind)

+{

+	init_waitqueue_head(&multiPkt->wait);

+	init_waitqueue_head(&multiPkt->reqWait);

+	

+	usb_multi_pkt_dma_init(multiPkt, is_bind);

+#if MULTIPACKET_BUF_ALLOC

+	usb_multi_pkt_list_init(multiPkt, is_bind);

+#else

+	usb_multi_pkt_buf_init(multiPkt, is_bind);

+#endif

+	return 0;

+}

+

+void usb_mutli_pkt_exit(struct usb_multi_packet *multiPkt, int is_bind)

+{

+	int index = multiPkt->reqNum;

+	struct usb_request	*req = NULL;

+#if MULTIPACKET_BUF_ALLOC

+	int addr_offset = multiPkt->trans_buffer_size ; //*multi_packet_get_maxnum();

+#endif

+	req = multiPkt->reqNode[0];

+	if(req){

+		if(req->buf){

+			if(is_bind)

+				dma_free_coherent(NULL, addr_offset*USB_BIND_DMA_BUF_NUM, req->buf, req->dma);

+			else

+				dma_free_coherent(NULL, addr_offset*USB_UNBIND_DMA_BUF_NUM, req->buf, req->dma);

+		}

+	}

+

+	while (index) 

+	{

+		req = multiPkt->reqNode[multiPkt->reqNum - index];

+		

+#if MULTIPACKET_BUF_ALLOC

+		if(req){

+				req->buf = NULL;

+				req->dma = NULL;			

+				list_del(&req->list);

+				multiPkt->reqNode[multiPkt->reqNum - index] = NULL;

+		}

+#else

+		iounmap(req->buf);

+		list_del(&req->list);

+#endif

+		

+        //iounmap(req->buf);

+		usb_ep_free_request(multiPkt->ep, req);

+

+		index--;

+	}

+}

+

+

+int multi_packet_get_maxnum(void)

+{

+	return multiPacket.maxPacketNum;

+}

+

+

+void multi_packet_activate(void)

+{	

+	if(multiPacket.active == 0)

+	{

+		multiPacket.active = 1;

+

+		USBSTACK_DBG("USB link is active!");

+

+		if(multi_packet_get_maxnum() > 1)

+		{

+			usbUnbind_ep_queue(&multiPacket.unbind);

+		}

+	}

+}

+

+

+void multi_packet_deactivate(void)

+{

+    if (multiPacket.active == 1)

+    {

+        multiPacket.active = 0;

+        USBSTACK_DBG("USB link is inactive!");

+    }

+}

+

+#undef RNDIS_DL_MULTI_DESC

+

+/* io  for vnic to write packet */

+int multi_packet_tx_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags)

+{

+	int	retval = 0;

+    unsigned long flags;

+	struct sk_buff *skb = (struct sk_buff *)req->context;

+	ep->is_netep = 1;

+	int bind_th_state = 0;

+	int tx_debug = 0;

+	if(multi_packet_get_maxnum() <=1 )

+	{

+		//req->dma = virt_to_phys(req->buf);

+		//retval = usb_gadget_map_request(multiPacket.gadget, req, 1);

+		//dma_sync_single_for_device();	

+		ep->protocol_type = 2;

+		retval = usb_ep_queue(ep, req, gfp_flags);

+	}

+	else

+	{

+#ifdef RNDIS_DL_MULTI_DESC

+		ep->protocol_type = 1;

+		retval = usb_ep_queue(ep, req, gfp_flags);

+#else

+		int num = 0;

+		int list_max = usb_get_rndis_list_max_flag();

+		struct usb_multi_packet *pbind = &multiPacket.bind;

+

+		ep->protocol_type = 1;

+

+		spin_lock_irqsave(&pbind->vnicPkt_spinLock, flags);

+#if 0

+		bind_th_state = atomic_read(&pbind->count);

+		if(bind_th_state == 0){			

+			spin_unlock_irqrestore(&pbind->vnicPkt_spinLock, flags);

+			//all usbBuf_list sent waiting for completed, so drop this skb

+			printk("bind usb_req exhausted, drops this skb\n");

+			tx_debug = get_panic_flag();

+			if(tx_debug & 0x4)

+				panic("bind usb_req exhausted\n");

+			return -ENOSPC;

+		}

+#endif		

+		if(list_max != 0){

+			num = atomic_read(&pbind->vnicPkt_num);

+			if(num > list_max){

+				spin_unlock_irqrestore(&pbind->vnicPkt_spinLock, flags);

+				wake_up(&pbind->wait);			

+				printk("bind vnicPkt backlog skb:%d, drop this packet\n", num);

+				return -ENOSPC;			

+			}

+		}		

+		list_add_tail(&req->list, &pbind->vincPkt_list);

+		spin_unlock_irqrestore(&pbind->vnicPkt_spinLock, flags);

+

+		num = atomic_inc_return(&pbind->vnicPkt_num);

+		if(num > multi_packet_get_maxnum()){

+			wake_up(&pbind->wait);	

+			return retval;				

+		}

+		num = num %multi_packet_get_maxnum();

+

+		//USBSTACK_DBG("add vnic pkt Tx len: %d, dma:0x%x", req->length, req->dma);

+		//USBSTACK_DBG("Tx num: %d", num);

+		if(skb_get_last_pkg(skb) == 1){

+			wake_up(&pbind->wait);

+			return retval;

+		}

+		

+		if((num == 0)||(req->length<1400)) 

+		{

+			/* ´ïµ½×î´ó×é°üÊý»òÕßÀ´Ð¡°üʱ£¬´¥·¢×é°ü*/

+			wake_up(&pbind->wait);

+		}

+		else if(num == 1)

+		{

+			/* »ýѹµÚÒ»¸ö°üʱ£¬Æô¶¯¼ÆÊ±Æ÷£¬±ÜÃâÊý¾Ý°ü»ýѹ³¬Ê± */

+			mod_timer(&pbind->timer, (jiffies + USB_BIND_TIMER_EXPIRES));

+			//USBSTACK_DBG( "mod_timer: %u", jiffies + USB_BIND_TIMER_EXPIRES);

+		}

+#endif

+	}

+	return retval;

+}

+

+

+/* io  for vnic to read packet */

+int multi_packet_rx_queue(struct usb_ep *ep, struct usb_request *req,  gfp_t gfp_flags)

+{

+	int	retval = 0;

+    unsigned long flags;

+

+	ep->is_netep = 1;

+	if(multi_packet_get_maxnum() <=1 )

+	{

+		///retval = usb_gadget_map_request(multiPacket.gadget, req, 0);

+		ep->protocol_type = 2;

+		retval = usb_ep_queue(ep, req, gfp_flags);

+	}

+	else

+	{

+		struct usb_multi_packet *punbind = &multiPacket.unbind;

+

+		ep->protocol_type = 1;

+		

+#if USE_DMA_TRANSFER

+		retval = usb_gadget_map_request(multiPacket.gadget, req, 0);

+		if(retval)

+		{

+			USBSTACK_DBG( "failed to map buffer");

+			return retval;

+		}

+#endif

+        spin_lock_irqsave(&punbind->vnicPkt_spinLock, flags);

+        list_add_tail(&req->list, &punbind->vincPkt_list);

+        spin_unlock_irqrestore(&punbind->vnicPkt_spinLock, flags);

+

+		atomic_inc(&punbind->vnicPkt_num);

+	}

+	return retval;

+}

+

+#if MULTIPACKET_BUF_ALLOC

+int usb_mutli_pkt_buf_alloc(struct usb_multi_packet *multiPkt, int is_bind)

+{

+	void *dma_buf_vir;

+	dma_addr_t	dma_buf_phy;

+	unsigned long flags;

+	int i = 0;

+	unsigned int usbBuf_num = 0;

+	struct usb_request	*req = NULL;

+    int addr_offset = 0 ;

+    //ÅжÏÍø¿¨ÀàÐÍ

+    if(multiPkt ==NULL)

+    {

+        printk("%s .line:%d ,para invalid \n",__func__,__LINE__) ;

+        return -1 ;

+    }

+    switch(multiPkt->net_type)

+    {

+        case 1:

+        {

+            //multiPkt->trans_buffer_size = USB_RNDIS_PKT_MAXSIZE;

+            addr_offset = USB_RNDIS_PKT_MAXSIZE*multi_packet_get_maxnum();

+            multiPkt->trans_buffer_size = addr_offset;

+            break;

+        }

+        case 2:

+        {

+            int siz = mbim_get_trans_buffer_size();

+            if(siz <= 0)

+            {

+                 printk("%s .line:%d ,siz invalid \n",__func__,__LINE__) ;

+                return -1 ;

+            }

+            printk("%s .line:%d ,siz =%d \n",__func__,__LINE__ ,siz) ;

+            siz = ((siz+3)>>2) << 2 ; //4×Ö½Ú¶ÔÆë

+            multiPkt->trans_buffer_size = siz; 

+            addr_offset = siz;

+            break;

+        }

+        default: 

+        {

+            printk("%s .line:%d ,para invalid , multiPkt->net_type = %d \n",__func__,__LINE__,multiPkt->net_type) ;

+            return -1 ;

+        }

+    }

+

+	//int addr_offset = USB_VIRTUAL_PACKET_MAXSIZE*multi_packet_get_maxnum(); //4×Ö½Ú¶ÔÆë

+

+	if(is_bind)

+		i = USB_BIND_DMA_BUF_NUM;

+	else

+		i = USB_UNBIND_DMA_BUF_NUM;

+

+	dma_buf_vir = dma_alloc_coherent(NULL, addr_offset*i,  &dma_buf_phy, GFP_KERNEL);

+	if (!dma_buf_vir){

+		printk(KERN_INFO "usb_mutli_pkt_buf_alloc bind:%d error[%s][%d]\n",is_bind,__func__,__LINE__);

+		return -ENOMEM;

+	}

+	while (i--) 

+	{

+		req = usb_ep_alloc_request(multiPkt->ep, GFP_ATOMIC);

+		if (!req){

+            		usb_printk("%s, bind:%d, Warning Alloc Request Fail!", __func__, is_bind);

+			spin_lock_irqsave(&multiPkt->usbBuf_spinLock, flags);

+			while (!list_empty(&multiPkt->usbBuf_list))

+			{

+				req = container_of(multiPkt->usbBuf_list.next, struct usb_request, list);

+				list_del(&req->list);

+				spin_unlock_irqrestore(&multiPkt->usbBuf_spinLock, flags);

+				usb_ep_free_request(multiPkt->ep, req);

+				req = NULL;

+				spin_lock_irqsave(&multiPkt->usbBuf_spinLock, flags);

+			}

+			spin_unlock_irqrestore(&multiPkt->usbBuf_spinLock, flags);

+			atomic_set(&multiPkt->usbBuft_num, 0);

+			multiPkt->reqNum = 0;

+			if(is_bind)

+				dma_free_coherent(NULL, addr_offset*USB_BIND_DMA_BUF_NUM, dma_buf_vir, dma_buf_phy);

+			else

+				dma_free_coherent(NULL, addr_offset*USB_UNBIND_DMA_BUF_NUM, dma_buf_vir, dma_buf_phy);

+			return -ENOMEM;

+		}

+	

+		req->buf = dma_buf_vir + addr_offset*usbBuf_num;

+		req->dma = dma_buf_phy + addr_offset*usbBuf_num;

+

+		if(is_bind)

+			req->complete = usbBind_tx_complete;

+		else

+			req->complete = usbUnbind_rx_complete ;

+

+		spin_lock_irqsave(&multiPkt->usbBuf_spinLock, flags);	

+		list_add_tail(&req->list, &multiPkt->usbBuf_list);

+		multiPkt->reqNode[usbBuf_num] = req;

+		spin_unlock_irqrestore(&multiPkt->usbBuf_spinLock, flags);

+		usbBuf_num = atomic_inc_return(&multiPkt->usbBuft_num);

+	}

+    USBSTACK_DBG("%s, bind:%d, i:%d, usbBuf_num:%d", __func__, is_bind, i, usbBuf_num);

+	

+alloc_end:

+	if(usbBuf_num > 0){		

+		multiPkt->reqNum = usbBuf_num;

+		multiPkt->buf_alloc_state = 1;

+	}

+	return 0;

+}

+

+void usb_mutli_pkt_buf_free(struct usb_multi_packet *multiPkt, int is_bind)

+{

+	unsigned long flags;

+	int index = multiPkt->reqNum;

+	struct usb_request	*req = NULL;

+	int addr_offset = multiPkt->trans_buffer_size ;//*multi_packet_get_maxnum();

+

+	while(multiPkt->dma_running){

+		usb_printk("bind:%d dma is running, waiting for complete\n",is_bind);

+		msleep(10);

+	}

+

+//printk("usb_mutli_pkt_buf_free :%d\n",multiPkt->reqNum);

+	multiPkt->buf_alloc_state = 0;

+	if(!is_bind)

+		usb_ep_free_queue(multiPkt->ep);

+

+	atomic_set(&multiPkt->usbBuft_num, 0);

+	INIT_LIST_HEAD(&multiPkt->usbBuf_list);//???

+

+	req = multiPkt->reqNode[0];

+	if(req){

+		if(req->buf){

+			if(is_bind)

+				dma_free_coherent(NULL, addr_offset*USB_BIND_DMA_BUF_NUM, req->buf, req->dma);

+			else

+				dma_free_coherent(NULL, addr_offset*USB_UNBIND_DMA_BUF_NUM, req->buf, req->dma);

+		}

+	}

+

+	while (index) 

+	{

+		req = multiPkt->reqNode[multiPkt->reqNum - index];

+		if(req){

+			req->buf = NULL;

+			req->dma = NULL;

+			spin_lock_irqsave(&multiPkt->usbBuf_spinLock, flags);	

+			multiPkt->reqNode[multiPkt->reqNum - index] = NULL;	

+			list_del(&req->list);

+			spin_unlock_irqrestore(&multiPkt->usbBuf_spinLock, flags);			

+			usb_ep_free_request(multiPkt->ep, req);

+			req = NULL;

+		}

+		index--;

+	}

+

+	multiPkt->reqNum = 0;	

+	multiPkt->reqRdPos = 0;

+	multiPkt->reqWrPos = 0;

+    multiPkt->trans_buffer_size = 0 ;

+}

+

+int multi_packet_buf_alloc()

+{

+	struct multi_packet *multiPkt = &multiPacket;

+	struct usb_multi_packet *pbind = &multiPacket.bind;

+	struct usb_multi_packet *punbind = &multiPacket.unbind;

+	int rtv;

+	

+	USBSTACK_DBG("multi_packet_buf_alloc!");

+	usb_printk("multi_packet_buf_alloc!\n");

+

+	if(multiPkt->maxPacketNum > 1){

+#ifndef RNDIS_DL_MULTI_DESC

+		rtv = usb_mutli_pkt_buf_alloc(pbind,1);

+		if(rtv < 0){

+			usb_printk("multi_packet_buf_alloc, pbind faild\n");

+			return -ENOMEM;

+		}

+#endif

+		rtv = usb_mutli_pkt_buf_alloc(punbind,0);

+		if(rtv < 0){

+			usb_mutli_pkt_buf_free(pbind,1);

+			usb_printk("multi_packet_buf_alloc, pbind faild\n");

+			return -ENOMEM;

+		}

+	}

+	return 0;

+}

+

+int multi_packet_buf_free()

+{

+	struct multi_packet *multiPkt = &multiPacket;

+	struct usb_multi_packet *pbind = &multiPacket.bind;

+	struct usb_multi_packet *punbind = &multiPacket.unbind;

+	

+	USBSTACK_DBG("multi_packet_buf_free!");

+	usb_printk("multi_packet_buf_free!\n");

+

+	if(multiPkt->maxPacketNum > 1){

+#ifndef RNDIS_DL_MULTI_DESC

+		usb_mutli_pkt_buf_free(pbind,1);

+#endif

+		usb_mutli_pkt_buf_free(punbind,0);

+	}

+	return 0;

+}

+#endif

+

+int multi_packet_handle_init(struct gether *geth, struct usb_gadget *gadget)

+{

+	struct multi_packet *multiPkt = &multiPacket;

+	struct usb_multi_packet *pbind = &multiPacket.bind;

+	struct usb_multi_packet *punbind = &multiPacket.unbind;

+    multiPkt->maxPacketNum = get_vnic_multi_packet_num();

+#if 1

+

+    size_t alloc_req_size =   (multiPkt->maxPacketNum + PACKET_BUF_EXTRA_NUM) * sizeof(struct usb_request *) ;

+    size_t alloc_dma_channel_size   = (multiPkt->maxPacketNum+1) *sizeof(dma_channel_def)  ;

+    size_t total_mem_size = (alloc_req_size+alloc_dma_channel_size) *2 ;

+    

+

+    multiPacket.alloc_mem =  kzalloc(total_mem_size,  GFP_KERNEL) ;

+

+    if(multiPacket.alloc_mem == NULL)

+    {

+        printk("[func]:%s ,[line]:%d ,alloc_mem is null \n",__func__,__LINE__) ;

+        return -1 ;

+    }

+    pbind->reqNode = (struct usb_request **) multiPacket.alloc_mem  ;

+    punbind->reqNode = (struct usb_request **) (multiPacket.alloc_mem + alloc_req_size) ;

+    pbind->dmaChannelCfg = (dma_channel_def	*) (multiPacket.alloc_mem + alloc_req_size*2);

+    punbind->dmaChannelCfg = (dma_channel_def	*) (multiPacket.alloc_mem +  alloc_req_size*2 + alloc_dma_channel_size);

+    

+#endif

+	USBSTACK_DBG("multi-pkt init begin!");

+	

+	multiPkt->active = 0;

+	

+	multiPkt->gadget = gadget;

+	multiPkt->geth = geth;

+

+	if(multiPkt->maxPacketNum>1)

+	{

+		//unbind = &multiPkt->unbind;	

+		punbind->ep = geth->out_ep;	

+#ifdef PKT_UNBIND

+		setup_timer(&punbind->timer, usbunBind_timerCallBack, (unsigned long)punbind);

+#endif

+		usb_multi_pkt_init(punbind, 0);

+		punbind->thread = kthread_run(usbUnbind_thread, (unsigned long)punbind+1, "usbunbind_thread");

+		BUG_ON(IS_ERR(punbind->thread));

+		//bind = &multiPkt->bind;	

+#ifndef RNDIS_DL_MULTI_DESC

+		pbind->ep =  geth->in_ep;

+		setup_timer(&pbind->timer, usbBind_timerCallBack, (unsigned long)pbind);

+		usb_multi_pkt_init(pbind, 1);

+		pbind->thread = kthread_run(usbBind_thread, pbind, "usbbind_thread");

+		BUG_ON(IS_ERR(pbind->thread));

+        if(strcmp(geth->func.name,RNDIS_NAME_STR)==0)

+        {

+            geth->wrap = NULL;

+

+        }

+#endif

+        pbind->maxPacketNum  = multiPkt->maxPacketNum ;

+        punbind->maxPacketNum  = multiPkt->maxPacketNum ;

+

+        if(strcmp(geth->func.name,RNDIS_NAME_STR) == 0)

+        {

+            geth->unwrap = NULL;

+        }

+

+        //ÅжÏÍø¿¨ÀàÐÍ

+        printk("$$$$$$$geth->func.name = %s \n",geth->func.name) ;

+        if(strcmp(geth->func.name,RNDIS_NAME_STR) == 0)

+        {

+            pbind->wrap = rndis_wrap;

+            pbind->unwrap = rndis_unwrap;

+            punbind->wrap = rndis_wrap;

+            punbind->unwrap = rndis_unwrap;

+            pbind->net_type  = 1 ;

+            punbind->net_type  = 1 ;

+

+        }else if(strcmp(geth->func.name,MBIM_NAME_STR) == 0)

+        {

+            pbind->wrap = mbim_wrap;

+            pbind->unwrap = mbim_unwrap;

+            punbind->wrap = mbim_wrap;

+            punbind->unwrap = mbim_unwrap;

+            pbind->net_type  = 2 ;

+            punbind->net_type = 2 ;

+

+        } else

+        {

+            pbind->wrap = NULL;

+            pbind->unwrap = NULL;

+            punbind->wrap = NULL;

+            punbind->unwrap = NULL;

+            pbind->net_type  = 0 ;

+            punbind->net_type = 0 ;

+            printk("$$$$$$ net type unknown \n");

+        }

+        

+	}

+	return 0;

+}

+

+

+void multi_packet_handle_exit(void)

+{

+	struct usb_multi_packet *pbind = &multiPacket.bind;

+	struct usb_multi_packet *punbind = &multiPacket.unbind;

+

+

+	USBSTACK_DBG("multi-pkt exit begin!");

+

+	if(multiPacket.maxPacketNum>1)

+	{

+#ifndef RNDIS_DL_MULTI_DESC

+		USBSTACK_DBG("kill bind thread");

+		kthread_stop(pbind->thread);

+		usb_mutli_pkt_exit(pbind, 1);

+#endif

+

+		USBSTACK_DBG("kill unbind thread");

+		kthread_stop(punbind->thread);

+		usb_mutli_pkt_exit(punbind, 0);

+	}

+	multiPacket.maxPacketNum = 0;

+    if(multiPacket.alloc_mem != NULL)

+    {

+        kfree(multiPacket.alloc_mem);

+        multiPacket.alloc_mem = NULL ;

+    }

+

+    pbind->maxPacketNum = 0 ;

+    punbind->maxPacketNum = 0 ;

+    

+    pbind->wrap = NULL;

+    pbind->unwrap = NULL;

+    punbind->wrap = NULL;

+    punbind->unwrap = NULL;

+

+}

+

diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi_packet.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi_packet.h
new file mode 100644
index 0000000..c9ebd1d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/multi_packet.h
@@ -0,0 +1,31 @@
+

+

+#include <linux/usb/gadget.h>

+#include "u_ether.h"

+#ifndef __MULTI_PACKET_H

+#define __MULTI_PACKET_H

+#define PKT_UNBIND

+

+#define MULTIPACKET_BUF_ALLOC 1

+int multi_packet_get_maxnum(void);

+typedef void (*usb_complete_t)(struct usb_ep *ep,	struct usb_request *req);

+

+int multi_packet_tx_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags);

+

+int multi_packet_rx_queue(struct usb_ep *ep, struct usb_request *req,  gfp_t gfp_flags);

+

+int multi_packet_handle_init(struct gether *geth, struct usb_gadget *gadget);

+void multi_packet_handle_exit(void);

+void mbim_change_rx_complete(usb_complete_t __complete);

+void u_ether_tx_vnic_packet_list(void);

+

+#if MULTIPACKET_BUF_ALLOC

+int multi_packet_buf_alloc();

+int multi_packet_buf_free();

+#endif

+

+void multi_packet_activate(void);

+

+void multi_packet_deactivate(void);

+

+#endif

diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mv_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mv_udc.h
new file mode 100644
index 0000000..e2be951
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mv_udc.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MV_UDC_H
+#define __MV_UDC_H
+
+#define VUSBHS_MAX_PORTS	8
+
+#define DQH_ALIGNMENT		2048
+#define DTD_ALIGNMENT		64
+#define DMA_BOUNDARY		4096
+
+#define EP_DIR_IN	1
+#define EP_DIR_OUT	0
+
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#define EP0_MAX_PKT_SIZE	64
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP		0
+#define DATA_STATE_XMIT		1
+#define DATA_STATE_NEED_ZLP	2
+#define WAIT_FOR_OUT_STATUS	3
+#define DATA_STATE_RECV		4
+
+#define CAPLENGTH_MASK		(0xff)
+#define DCCPARAMS_DEN_MASK	(0x1f)
+
+#define HCSPARAMS_PPC		(0x10)
+
+/* Frame Index Register Bit Masks */
+#define USB_FRINDEX_MASKS	0x3fff
+
+/* Command Register Bit Masks */
+#define USBCMD_RUN_STOP				(0x00000001)
+#define USBCMD_CTRL_RESET			(0x00000002)
+#define USBCMD_SETUP_TRIPWIRE_SET		(0x00002000)
+#define USBCMD_SETUP_TRIPWIRE_CLEAR		(~USBCMD_SETUP_TRIPWIRE_SET)
+
+#define USBCMD_ATDTW_TRIPWIRE_SET		(0x00004000)
+#define USBCMD_ATDTW_TRIPWIRE_CLEAR		(~USBCMD_ATDTW_TRIPWIRE_SET)
+
+/* bit 15,3,2 are for frame list size */
+#define USBCMD_FRAME_SIZE_1024			(0x00000000) /* 000 */
+#define USBCMD_FRAME_SIZE_512			(0x00000004) /* 001 */
+#define USBCMD_FRAME_SIZE_256			(0x00000008) /* 010 */
+#define USBCMD_FRAME_SIZE_128			(0x0000000C) /* 011 */
+#define USBCMD_FRAME_SIZE_64			(0x00008000) /* 100 */
+#define USBCMD_FRAME_SIZE_32			(0x00008004) /* 101 */
+#define USBCMD_FRAME_SIZE_16			(0x00008008) /* 110 */
+#define USBCMD_FRAME_SIZE_8			(0x0000800C) /* 111 */
+
+#define EPCTRL_TX_ALL_MASK			(0xFFFF0000)
+#define EPCTRL_RX_ALL_MASK			(0x0000FFFF)
+
+#define EPCTRL_TX_DATA_TOGGLE_RST		(0x00400000)
+#define EPCTRL_TX_EP_STALL			(0x00010000)
+#define EPCTRL_RX_EP_STALL			(0x00000001)
+#define EPCTRL_RX_DATA_TOGGLE_RST		(0x00000040)
+#define EPCTRL_RX_ENABLE			(0x00000080)
+#define EPCTRL_TX_ENABLE			(0x00800000)
+#define EPCTRL_CONTROL				(0x00000000)
+#define EPCTRL_ISOCHRONOUS			(0x00040000)
+#define EPCTRL_BULK				(0x00080000)
+#define EPCTRL_INT				(0x000C0000)
+#define EPCTRL_TX_TYPE				(0x000C0000)
+#define EPCTRL_RX_TYPE				(0x0000000C)
+#define EPCTRL_DATA_TOGGLE_INHIBIT		(0x00000020)
+#define EPCTRL_TX_EP_TYPE_SHIFT			(18)
+#define EPCTRL_RX_EP_TYPE_SHIFT			(2)
+
+#define EPCOMPLETE_MAX_ENDPOINTS		(16)
+
+/* endpoint list address bit masks */
+#define USB_EP_LIST_ADDRESS_MASK              0xfffff800
+
+#define PORTSCX_W1C_BITS			0x2a
+#define PORTSCX_PORT_RESET			0x00000100
+#define PORTSCX_PORT_POWER			0x00001000
+#define PORTSCX_FORCE_FULL_SPEED_CONNECT	0x01000000
+#define PORTSCX_PAR_XCVR_SELECT			0xC0000000
+#define PORTSCX_PORT_FORCE_RESUME		0x00000040
+#define PORTSCX_PORT_SUSPEND			0x00000080
+#define PORTSCX_PORT_SPEED_FULL			0x00000000
+#define PORTSCX_PORT_SPEED_LOW			0x04000000
+#define PORTSCX_PORT_SPEED_HIGH			0x08000000
+#define PORTSCX_PORT_SPEED_MASK			0x0C000000
+
+/* USB MODE Register Bit Masks */
+#define USBMODE_CTRL_MODE_IDLE			0x00000000
+#define USBMODE_CTRL_MODE_DEVICE		0x00000002
+#define USBMODE_CTRL_MODE_HOST			0x00000003
+#define USBMODE_CTRL_MODE_RSV			0x00000001
+#define USBMODE_SETUP_LOCK_OFF			0x00000008
+#define USBMODE_STREAM_DISABLE			0x00000010
+
+/* USB STS Register Bit Masks */
+#define USBSTS_INT			0x00000001
+#define USBSTS_ERR			0x00000002
+#define USBSTS_PORT_CHANGE		0x00000004
+#define USBSTS_FRM_LST_ROLL		0x00000008
+#define USBSTS_SYS_ERR			0x00000010
+#define USBSTS_IAA			0x00000020
+#define USBSTS_RESET			0x00000040
+#define USBSTS_SOF			0x00000080
+#define USBSTS_SUSPEND			0x00000100
+#define USBSTS_HC_HALTED		0x00001000
+#define USBSTS_RCL			0x00002000
+#define USBSTS_PERIODIC_SCHEDULE	0x00004000
+#define USBSTS_ASYNC_SCHEDULE		0x00008000
+
+
+/* Interrupt Enable Register Bit Masks */
+#define USBINTR_INT_EN                          (0x00000001)
+#define USBINTR_ERR_INT_EN                      (0x00000002)
+#define USBINTR_PORT_CHANGE_DETECT_EN           (0x00000004)
+
+#define USBINTR_ASYNC_ADV_AAE                   (0x00000020)
+#define USBINTR_ASYNC_ADV_AAE_ENABLE            (0x00000020)
+#define USBINTR_ASYNC_ADV_AAE_DISABLE           (0xFFFFFFDF)
+
+#define USBINTR_RESET_EN                        (0x00000040)
+#define USBINTR_SOF_UFRAME_EN                   (0x00000080)
+#define USBINTR_DEVICE_SUSPEND                  (0x00000100)
+
+#define USB_DEVICE_ADDRESS_MASK			(0xfe000000)
+#define USB_DEVICE_ADDRESS_BIT_SHIFT		(25)
+
+struct mv_cap_regs {
+	u32	caplength_hciversion;
+	u32	hcsparams;	/* HC structural parameters */
+	u32	hccparams;	/* HC Capability Parameters*/
+	u32	reserved[5];
+	u32	dciversion;	/* DC version number and reserved 16 bits */
+	u32	dccparams;	/* DC Capability Parameters */
+};
+
+struct mv_op_regs {
+	u32	usbcmd;		/* Command register */
+	u32	usbsts;		/* Status register */
+	u32	usbintr;	/* Interrupt enable */
+	u32	frindex;	/* Frame index */
+	u32	reserved1[1];
+	u32	deviceaddr;	/* Device Address */
+	u32	eplistaddr;	/* Endpoint List Address */
+	u32	ttctrl;		/* HOST TT status and control */
+	u32	burstsize;	/* Programmable Burst Size */
+	u32	txfilltuning;	/* Host Transmit Pre-Buffer Packet Tuning */
+	u32	reserved[4];
+	u32	epnak;		/* Endpoint NAK */
+	u32	epnaken;	/* Endpoint NAK Enable */
+	u32	configflag;	/* Configured Flag register */
+	u32	portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
+	u32	otgsc;
+	u32	usbmode;	/* USB Host/Device mode */
+	u32	epsetupstat;	/* Endpoint Setup Status */
+	u32	epprime;	/* Endpoint Initialize */
+	u32	epflush;	/* Endpoint De-initialize */
+	u32	epstatus;	/* Endpoint Status */
+	u32	epcomplete;	/* Endpoint Interrupt On Complete */
+	u32	epctrlx[16];	/* Endpoint Control, where x = 0.. 15 */
+	u32	mcr;		/* Mux Control */
+	u32	isr;		/* Interrupt Status */
+	u32	ier;		/* Interrupt Enable */
+};
+
+struct mv_udc {
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	spinlock_t			lock;
+	struct completion		*done;
+	struct platform_device		*dev;
+	int				irq;
+
+	struct mv_cap_regs __iomem	*cap_regs;
+	struct mv_op_regs __iomem	*op_regs;
+	void __iomem                    *phy_regs;
+	unsigned int			max_eps;
+	struct mv_dqh			*ep_dqh;
+	size_t				ep_dqh_size;
+	dma_addr_t			ep_dqh_dma;
+
+	struct dma_pool			*dtd_pool;
+	struct mv_ep			*eps;
+
+	struct mv_dtd			*dtd_head;
+	struct mv_dtd			*dtd_tail;
+	unsigned int			dtd_entries;
+
+	struct mv_req			*status_req;
+	struct usb_ctrlrequest		local_setup_buff;
+
+	unsigned int		resume_state;	/* USB state to resume */
+	unsigned int		usb_state;	/* USB current state */
+	unsigned int		ep0_state;	/* Endpoint zero state */
+	unsigned int		ep0_dir;
+
+	unsigned int		dev_addr;
+	unsigned int		test_mode;
+
+	int			errors;
+	unsigned		softconnect:1,
+				vbus_active:1,
+				remote_wakeup:1,
+				softconnected:1,
+				force_fs:1,
+				clock_gating:1,
+				active:1,
+				stopped:1;      /* stop bit is setted */
+
+	struct work_struct	vbus_work;
+	struct workqueue_struct *qwork;
+
+	struct usb_phy		*transceiver;
+
+	struct mv_usb_platform_data     *pdata;
+
+	/* some SOC has mutiple clock sources for USB*/
+	unsigned int    clknum;
+	struct clk      *clk[0];
+};
+
+/* endpoint data structure */
+struct mv_ep {
+	struct usb_ep		ep;
+	struct mv_udc		*udc;
+	struct list_head	queue;
+	struct mv_dqh		*dqh;
+	const struct usb_endpoint_descriptor	*desc;
+	u32			direction;
+	char			name[14];
+	unsigned		stopped:1,
+				wedge:1,
+				ep_type:2,
+				ep_num:8;
+};
+
+/* request data structure */
+struct mv_req {
+	struct usb_request	req;
+	struct mv_dtd		*dtd, *head, *tail;
+	struct mv_ep		*ep;
+	struct list_head	queue;
+	unsigned int            test_mode;
+	unsigned		dtd_count;
+	unsigned		mapped:1;
+};
+
+#define EP_QUEUE_HEAD_MULT_POS			30
+#define EP_QUEUE_HEAD_ZLT_SEL			0x20000000
+#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS		16
+#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info)	(((ep_info)>>16)&0x07ff)
+#define EP_QUEUE_HEAD_IOS			0x00008000
+#define EP_QUEUE_HEAD_NEXT_TERMINATE		0x00000001
+#define EP_QUEUE_HEAD_IOC			0x00008000
+#define EP_QUEUE_HEAD_MULTO			0x00000C00
+#define EP_QUEUE_HEAD_STATUS_HALT		0x00000040
+#define EP_QUEUE_HEAD_STATUS_ACTIVE		0x00000080
+#define EP_QUEUE_CURRENT_OFFSET_MASK		0x00000FFF
+#define EP_QUEUE_HEAD_NEXT_POINTER_MASK		0xFFFFFFE0
+#define EP_QUEUE_FRINDEX_MASK			0x000007FF
+#define EP_MAX_LENGTH_TRANSFER			0x4000
+
+struct mv_dqh {
+	/* Bits 16..26 Bit 15 is Interrupt On Setup */
+	u32	max_packet_length;
+	u32	curr_dtd_ptr;		/* Current dTD Pointer */
+	u32	next_dtd_ptr;		/* Next dTD Pointer */
+	/* Total bytes (16..30), IOC (15), INT (8), STS (0-7) */
+	u32	size_ioc_int_sts;
+	u32	buff_ptr0;		/* Buffer pointer Page 0 (12-31) */
+	u32	buff_ptr1;		/* Buffer pointer Page 1 (12-31) */
+	u32	buff_ptr2;		/* Buffer pointer Page 2 (12-31) */
+	u32	buff_ptr3;		/* Buffer pointer Page 3 (12-31) */
+	u32	buff_ptr4;		/* Buffer pointer Page 4 (12-31) */
+	u32	reserved1;
+	/* 8 bytes of setup data that follows the Setup PID */
+	u8	setup_buffer[8];
+	u32	reserved2[4];
+};
+
+
+#define DTD_NEXT_TERMINATE		(0x00000001)
+#define DTD_IOC				(0x00008000)
+#define DTD_STATUS_ACTIVE		(0x00000080)
+#define DTD_STATUS_HALTED		(0x00000040)
+#define DTD_STATUS_DATA_BUFF_ERR	(0x00000020)
+#define DTD_STATUS_TRANSACTION_ERR	(0x00000008)
+#define DTD_RESERVED_FIELDS		(0x00007F00)
+#define DTD_ERROR_MASK			(0x68)
+#define DTD_ADDR_MASK			(0xFFFFFFE0)
+#define DTD_PACKET_SIZE			0x7FFF0000
+#define DTD_LENGTH_BIT_POS		(16)
+
+struct mv_dtd {
+	u32	dtd_next;
+	u32	size_ioc_sts;
+	u32	buff_ptr0;		/* Buffer pointer Page 0 */
+	u32	buff_ptr1;		/* Buffer pointer Page 1 */
+	u32	buff_ptr2;		/* Buffer pointer Page 2 */
+	u32	buff_ptr3;		/* Buffer pointer Page 3 */
+	u32	buff_ptr4;		/* Buffer pointer Page 4 */
+	u32	scratch_ptr;
+	/* 32 bytes */
+	dma_addr_t td_dma;		/* dma address for this td */
+	struct mv_dtd *next_dtd_virt;
+};
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mv_udc_core.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mv_udc_core.c
new file mode 100644
index 0000000..a73cf40
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/mv_udc_core.c
@@ -0,0 +1,2494 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ *	   Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/platform_data/mv_usb.h>
+#include <asm/unaligned.h>
+
+#include "mv_udc.h"
+
+#define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
+#define DRIVER_VERSION		"8 Nov 2010"
+
+#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
+				((ep)->udc->ep0_dir) : ((ep)->direction))
+
+/* timeout value -- usec */
+#define RESET_TIMEOUT		10000
+#define FLUSH_TIMEOUT		10000
+#define EPSTATUS_TIMEOUT	10000
+#define PRIME_TIMEOUT		10000
+#define READSAFE_TIMEOUT	1000
+#define DTD_TIMEOUT		1000
+
+#define LOOPS_USEC_SHIFT	4
+#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
+#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
+
+static DECLARE_COMPLETION(release_done);
+
+static const char driver_name[] = "mv_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+/* controller device global variable */
+static struct mv_udc	*the_controller;
+int mv_usb_otgsc;
+
+static void nuke(struct mv_ep *ep, int status);
+static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor mv_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
+};
+
+static void ep0_reset(struct mv_udc *udc)
+{
+	struct mv_ep *ep;
+	u32 epctrlx;
+	int i = 0;
+
+	/* ep0 in and out */
+	for (i = 0; i < 2; i++) {
+		ep = &udc->eps[i];
+		ep->udc = udc;
+
+		/* ep0 dQH */
+		ep->dqh = &udc->ep_dqh[i];
+
+		/* configure ep0 endpoint capabilities in dQH */
+		ep->dqh->max_packet_length =
+			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+			| EP_QUEUE_HEAD_IOS;
+
+		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
+
+		epctrlx = readl(&udc->op_regs->epctrlx[0]);
+		if (i) {	/* TX */
+			epctrlx |= EPCTRL_TX_ENABLE
+				| (USB_ENDPOINT_XFER_CONTROL
+					<< EPCTRL_TX_EP_TYPE_SHIFT);
+
+		} else {	/* RX */
+			epctrlx |= EPCTRL_RX_ENABLE
+				| (USB_ENDPOINT_XFER_CONTROL
+					<< EPCTRL_RX_EP_TYPE_SHIFT);
+		}
+
+		writel(epctrlx, &udc->op_regs->epctrlx[0]);
+	}
+}
+
+/* protocol ep0 stall, will automatically be cleared on new transaction */
+static void ep0_stall(struct mv_udc *udc)
+{
+	u32	epctrlx;
+
+	/* set TX and RX to stall */
+	epctrlx = readl(&udc->op_regs->epctrlx[0]);
+	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
+	writel(epctrlx, &udc->op_regs->epctrlx[0]);
+
+	/* update ep0 state */
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->ep0_dir = EP_DIR_OUT;
+}
+
+static int process_ep_req(struct mv_udc *udc, int index,
+	struct mv_req *curr_req)
+{
+	struct mv_dtd	*curr_dtd;
+	struct mv_dqh	*curr_dqh;
+	int td_complete, actual, remaining_length;
+	int i, direction;
+	int retval = 0;
+	u32 errors;
+	u32 bit_pos;
+
+	curr_dqh = &udc->ep_dqh[index];
+	direction = index % 2;
+
+	curr_dtd = curr_req->head;
+	td_complete = 0;
+	actual = curr_req->req.length;
+
+	for (i = 0; i < curr_req->dtd_count; i++) {
+		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
+			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
+				udc->eps[index].name);
+			return 1;
+		}
+
+		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
+		if (!errors) {
+			remaining_length =
+				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
+					>> DTD_LENGTH_BIT_POS;
+			actual -= remaining_length;
+
+			if (remaining_length) {
+				if (direction) {
+					dev_dbg(&udc->dev->dev,
+						"TX dTD remains data\n");
+					retval = -EPROTO;
+					break;
+				} else
+					break;
+			}
+		} else {
+			dev_info(&udc->dev->dev,
+				"complete_tr error: ep=%d %s: error = 0x%x\n",
+				index >> 1, direction ? "SEND" : "RECV",
+				errors);
+			if (errors & DTD_STATUS_HALTED) {
+				/* Clear the errors and Halt condition */
+				curr_dqh->size_ioc_int_sts &= ~errors;
+				retval = -EPIPE;
+			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
+				retval = -EPROTO;
+			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
+				retval = -EILSEQ;
+			}
+		}
+		if (i != curr_req->dtd_count - 1)
+			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
+	}
+	if (retval)
+		return retval;
+
+	if (direction == EP_DIR_OUT)
+		bit_pos = 1 << curr_req->ep->ep_num;
+	else
+		bit_pos = 1 << (16 + curr_req->ep->ep_num);
+
+	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
+		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
+			while (readl(&udc->op_regs->epstatus) & bit_pos)
+				udelay(1);
+			break;
+		}
+		udelay(1);
+	}
+
+	curr_req->req.actual = actual;
+
+	return 0;
+}
+
+/*
+ * done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ * request is still in progress.
+ */
+static void done(struct mv_ep *ep, struct mv_req *req, int status)
+{
+	struct mv_udc *udc = NULL;
+	unsigned char stopped = ep->stopped;
+	struct mv_dtd *curr_td, *next_td;
+	int j;
+
+	udc = (struct mv_udc *)ep->udc;
+	/* Removed the req from fsl_ep->queue */
+	list_del_init(&req->queue);
+
+	/* req.status should be set as -EINPROGRESS in ep_queue() */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	/* Free dtd for the request */
+	next_td = req->head;
+	for (j = 0; j < req->dtd_count; j++) {
+		curr_td = next_td;
+		if (j != req->dtd_count - 1)
+			next_td = curr_td->next_dtd_virt;
+		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
+	}
+
+	if (req->mapped) {
+		dma_unmap_single(ep->udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			((ep_dir(ep) == EP_DIR_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE));
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	} else
+		dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			((ep_dir(ep) == EP_DIR_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE));
+
+	if (status && (status != -ESHUTDOWN))
+		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	ep->stopped = 1;
+
+	spin_unlock(&ep->udc->lock);
+	/*
+	 * complete() is from gadget layer,
+	 * eg fsg->bulk_in_complete()
+	 */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&ep->udc->lock);
+	ep->stopped = stopped;
+}
+
+static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
+{
+	struct mv_udc *udc;
+	struct mv_dqh *dqh;
+	u32 bit_pos, direction;
+	u32 usbcmd, epstatus;
+	unsigned int loops;
+	int retval = 0;
+
+	udc = ep->udc;
+	direction = ep_dir(ep);
+	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
+	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+
+	/* check if the pipe is empty */
+	if (!(list_empty(&ep->queue))) {
+		struct mv_req *lastreq;
+		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
+		lastreq->tail->dtd_next =
+			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+		wmb();
+
+		if (readl(&udc->op_regs->epprime) & bit_pos)
+			goto done;
+
+		loops = LOOPS(READSAFE_TIMEOUT);
+		while (1) {
+			/* start with setting the semaphores */
+			usbcmd = readl(&udc->op_regs->usbcmd);
+			usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
+			writel(usbcmd, &udc->op_regs->usbcmd);
+
+			/* read the endpoint status */
+			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
+
+			/*
+			 * Reread the ATDTW semaphore bit to check if it is
+			 * cleared. When hardware see a hazard, it will clear
+			 * the bit or else we remain set to 1 and we can
+			 * proceed with priming of endpoint if not already
+			 * primed.
+			 */
+			if (readl(&udc->op_regs->usbcmd)
+				& USBCMD_ATDTW_TRIPWIRE_SET)
+				break;
+
+			loops--;
+			if (loops == 0) {
+				dev_err(&udc->dev->dev,
+					"Timeout for ATDTW_TRIPWIRE...\n");
+				retval = -ETIME;
+				goto done;
+			}
+			udelay(LOOPS_USEC);
+		}
+
+		/* Clear the semaphore */
+		usbcmd = readl(&udc->op_regs->usbcmd);
+		usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
+		writel(usbcmd, &udc->op_regs->usbcmd);
+
+		if (epstatus)
+			goto done;
+	}
+
+	/* Write dQH next pointer and terminate bit to 0 */
+	dqh->next_dtd_ptr = req->head->td_dma
+				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+	/* clear active and halt bit, in case set from a previous error */
+	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
+
+	/* Ensure that updates to the QH will occure before priming. */
+	wmb();
+
+	/* Prime the Endpoint */
+	writel(bit_pos, &udc->op_regs->epprime);
+
+done:
+	return retval;
+}
+
+
+static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
+		dma_addr_t *dma, int *is_last)
+{
+	u32 temp;
+	struct mv_dtd *dtd;
+	struct mv_udc *udc;
+
+	/* how big will this transfer be? */
+	*length = min(req->req.length - req->req.actual,
+			(unsigned)EP_MAX_LENGTH_TRANSFER);
+
+	udc = req->ep->udc;
+
+	/*
+	 * Be careful that no _GFP_HIGHMEM is set,
+	 * or we can not use dma_to_virt
+	 */
+	dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
+	if (dtd == NULL)
+		return dtd;
+
+	dtd->td_dma = *dma;
+	/* initialize buffer page pointers */
+	temp = (u32)(req->req.dma + req->req.actual);
+	dtd->buff_ptr0 = cpu_to_le32(temp);
+	temp &= ~0xFFF;
+	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
+	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
+	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
+	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
+
+	req->req.actual += *length;
+
+	/* zlp is needed if req->req.zero is set */
+	if (req->req.zero) {
+		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+			*is_last = 1;
+		else
+			*is_last = 0;
+	} else if (req->req.length == req->req.actual)
+		*is_last = 1;
+	else
+		*is_last = 0;
+
+	/* Fill in the transfer size; set active bit */
+	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
+
+	/* Enable interrupt for the last dtd of a request */
+	if (*is_last && !req->req.no_interrupt)
+		temp |= DTD_IOC;
+
+	dtd->size_ioc_sts = temp;
+
+	mb();
+
+	return dtd;
+}
+
+/* generate dTD linked list for a request */
+static int req_to_dtd(struct mv_req *req)
+{
+	unsigned count;
+	int is_last, is_first = 1;
+	struct mv_dtd *dtd, *last_dtd = NULL;
+	struct mv_udc *udc;
+	dma_addr_t dma;
+
+	udc = req->ep->udc;
+
+	do {
+		dtd = build_dtd(req, &count, &dma, &is_last);
+		if (dtd == NULL)
+			return -ENOMEM;
+
+		if (is_first) {
+			is_first = 0;
+			req->head = dtd;
+		} else {
+			last_dtd->dtd_next = dma;
+			last_dtd->next_dtd_virt = dtd;
+		}
+		last_dtd = dtd;
+		req->dtd_count++;
+	} while (!is_last);
+
+	/* set terminate bit to 1 for the last dTD */
+	dtd->dtd_next = DTD_NEXT_TERMINATE;
+
+	req->tail = dtd;
+
+	return 0;
+}
+
+static int mv_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct mv_udc *udc;
+	struct mv_ep *ep;
+	struct mv_dqh *dqh;
+	u16 max = 0;
+	u32 bit_pos, epctrlx, direction;
+	unsigned char zlt = 0, ios = 0, mult = 0;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	udc = ep->udc;
+
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	direction = ep_dir(ep);
+	max = usb_endpoint_maxp(desc);
+
+	/*
+	 * disable HW zero length termination select
+	 * driver handles zero length packet through req->req.zero
+	 */
+	zlt = 1;
+
+	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
+
+	/* Check if the Endpoint is Primed */
+	if ((readl(&udc->op_regs->epprime) & bit_pos)
+		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
+		dev_info(&udc->dev->dev,
+			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
+			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
+			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
+			(unsigned)readl(&udc->op_regs->epprime),
+			(unsigned)readl(&udc->op_regs->epstatus),
+			(unsigned)bit_pos);
+		goto en_done;
+	}
+	/* Set the max packet length, interrupt on Setup and Mult fields */
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:
+		zlt = 1;
+		mult = 0;
+		break;
+	case USB_ENDPOINT_XFER_CONTROL:
+		ios = 1;
+	case USB_ENDPOINT_XFER_INT:
+		mult = 0;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		/* Calculate transactions needed for high bandwidth iso */
+		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+		max = max & 0x7ff;	/* bit 0~10 */
+		/* 3 transactions at most */
+		if (mult > 3)
+			goto en_done;
+		break;
+	default:
+		goto en_done;
+	}
+
+	spin_lock_irqsave(&udc->lock, flags);
+	/* Get the endpoint queue head address */
+	dqh = ep->dqh;
+	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+		| (mult << EP_QUEUE_HEAD_MULT_POS)
+		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
+		| (ios ? EP_QUEUE_HEAD_IOS : 0);
+	dqh->next_dtd_ptr = 1;
+	dqh->size_ioc_int_sts = 0;
+
+	ep->ep.maxpacket = max;
+	ep->desc = desc;
+	ep->stopped = 0;
+
+	/* Enable the endpoint for Rx or Tx and set the endpoint type */
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if (direction == EP_DIR_IN) {
+		epctrlx &= ~EPCTRL_TX_ALL_MASK;
+		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
+			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				<< EPCTRL_TX_EP_TYPE_SHIFT);
+	} else {
+		epctrlx &= ~EPCTRL_RX_ALL_MASK;
+		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
+			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				<< EPCTRL_RX_EP_TYPE_SHIFT);
+	}
+	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+	/*
+	 * Implement Guideline (GL# USB-7) The unused endpoint type must
+	 * be programmed to bulk.
+	 */
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
+		epctrlx |= (USB_ENDPOINT_XFER_BULK
+				<< EPCTRL_RX_EP_TYPE_SHIFT);
+		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+	}
+
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
+		epctrlx |= (USB_ENDPOINT_XFER_BULK
+				<< EPCTRL_TX_EP_TYPE_SHIFT);
+		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+en_done:
+	return -EINVAL;
+}
+
+static int  mv_ep_disable(struct usb_ep *_ep)
+{
+	struct mv_udc *udc;
+	struct mv_ep *ep;
+	struct mv_dqh *dqh;
+	u32 bit_pos, epctrlx, direction;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	if ((_ep == NULL) || !ep->desc)
+		return -EINVAL;
+
+	udc = ep->udc;
+
+	/* Get the endpoint queue head address */
+	dqh = ep->dqh;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	direction = ep_dir(ep);
+	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
+
+	/* Reset the max packet length and the interrupt on Setup */
+	dqh->max_packet_length = 0;
+
+	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	epctrlx &= ~((direction == EP_DIR_IN)
+			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
+			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
+	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+	/* nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->stopped = 1;
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static struct usb_request *
+mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct mv_req *req = NULL;
+
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct mv_req *req = NULL;
+
+	req = container_of(_req, struct mv_req, req);
+
+	if (_req)
+		kfree(req);
+}
+
+static void mv_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct mv_udc *udc;
+	u32 bit_pos, direction;
+	struct mv_ep *ep;
+	unsigned int loops;
+
+	if (!_ep)
+		return;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	if (!ep->desc)
+		return;
+
+	udc = ep->udc;
+	direction = ep_dir(ep);
+
+	if (ep->ep_num == 0)
+		bit_pos = (1 << 16) | 1;
+	else if (direction == EP_DIR_OUT)
+		bit_pos = 1 << ep->ep_num;
+	else
+		bit_pos = 1 << (16 + ep->ep_num);
+
+	loops = LOOPS(EPSTATUS_TIMEOUT);
+	do {
+		unsigned int inter_loops;
+
+		if (loops == 0) {
+			dev_err(&udc->dev->dev,
+				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
+				(unsigned)readl(&udc->op_regs->epstatus),
+				(unsigned)bit_pos);
+			return;
+		}
+		/* Write 1 to the Flush register */
+		writel(bit_pos, &udc->op_regs->epflush);
+
+		/* Wait until flushing completed */
+		inter_loops = LOOPS(FLUSH_TIMEOUT);
+		while (readl(&udc->op_regs->epflush)) {
+			/*
+			 * ENDPTFLUSH bit should be cleared to indicate this
+			 * operation is complete
+			 */
+			if (inter_loops == 0) {
+				dev_err(&udc->dev->dev,
+					"TIMEOUT for ENDPTFLUSH=0x%x,"
+					"bit_pos=0x%x\n",
+					(unsigned)readl(&udc->op_regs->epflush),
+					(unsigned)bit_pos);
+				return;
+			}
+			inter_loops--;
+			udelay(LOOPS_USEC);
+		}
+		loops--;
+	} while (readl(&udc->op_regs->epstatus) & bit_pos);
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	struct mv_req *req = container_of(_req, struct mv_req, req);
+	struct mv_udc *udc = ep->udc;
+	unsigned long flags;
+
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		dev_err(&udc->dev->dev, "%s, bad params", __func__);
+		return -EINVAL;
+	}
+	if (unlikely(!_ep || !ep->desc)) {
+		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
+		return -EINVAL;
+	}
+	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+	}
+
+	udc = ep->udc;
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	req->ep = ep;
+
+	/* map virtual address to hardware */
+	if (req->req.dma == DMA_ADDR_INVALID) {
+		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+					req->req.buf,
+					req->req.length, ep_dir(ep)
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+		req->mapped = 1;
+	} else {
+		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+					req->req.dma, req->req.length,
+					ep_dir(ep)
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+		req->mapped = 0;
+	}
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->dtd_count = 0;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* build dtds and push them to device queue */
+	if (!req_to_dtd(req)) {
+		int retval;
+		retval = queue_dtd(ep, req);
+		if (retval) {
+			spin_unlock_irqrestore(&udc->lock, flags);
+			return retval;
+		}
+	} else {
+		spin_unlock_irqrestore(&udc->lock, flags);
+		return -ENOMEM;
+	}
+
+	/* Update ep0 state */
+	if (ep->ep_num == 0)
+		udc->ep0_state = DATA_STATE_XMIT;
+
+	/* irq handler advances the queue */
+	list_add_tail(&req->queue, &ep->queue);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
+{
+	struct mv_dqh *dqh = ep->dqh;
+	u32 bit_pos;
+
+	/* Write dQH next pointer and terminate bit to 0 */
+	dqh->next_dtd_ptr = req->head->td_dma
+		& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+	/* clear active and halt bit, in case set from a previous error */
+	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
+
+	/* Ensure that updates to the QH will occure before priming. */
+	wmb();
+
+	bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+
+	/* Prime the Endpoint */
+	writel(bit_pos, &ep->udc->op_regs->epprime);
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	struct mv_req *req;
+	struct mv_udc *udc = ep->udc;
+	unsigned long flags;
+	int stopped, ret = 0;
+	u32 epctrlx;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	stopped = ep->stopped;
+
+	/* Stop the ep before we deal with the queue */
+	ep->stopped = 1;
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if (ep_dir(ep) == EP_DIR_IN)
+		epctrlx &= ~EPCTRL_TX_ENABLE;
+	else
+		epctrlx &= ~EPCTRL_RX_ENABLE;
+	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* The request is in progress, or completed but not dequeued */
+	if (ep->queue.next == &req->queue) {
+		_req->status = -ECONNRESET;
+		mv_ep_fifo_flush(_ep);	/* flush current transfer */
+
+		/* The request isn't the last request in this ep queue */
+		if (req->queue.next != &ep->queue) {
+			struct mv_req *next_req;
+
+			next_req = list_entry(req->queue.next,
+				struct mv_req, queue);
+
+			/* Point the QH to the first TD of next request */
+			mv_prime_ep(ep, next_req);
+		} else {
+			struct mv_dqh *qh;
+
+			qh = ep->dqh;
+			qh->next_dtd_ptr = 1;
+			qh->size_ioc_int_sts = 0;
+		}
+
+		/* The request hasn't been processed, patch up the TD chain */
+	} else {
+		struct mv_req *prev_req;
+
+		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
+		writel(readl(&req->tail->dtd_next),
+				&prev_req->tail->dtd_next);
+
+	}
+
+	done(ep, req, -ECONNRESET);
+
+	/* Enable EP */
+out:
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if (ep_dir(ep) == EP_DIR_IN)
+		epctrlx |= EPCTRL_TX_ENABLE;
+	else
+		epctrlx |= EPCTRL_RX_ENABLE;
+	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+	ep->stopped = stopped;
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return ret;
+}
+
+static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
+{
+	u32 epctrlx;
+
+	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
+
+	if (stall) {
+		if (direction == EP_DIR_IN)
+			epctrlx |= EPCTRL_TX_EP_STALL;
+		else
+			epctrlx |= EPCTRL_RX_EP_STALL;
+	} else {
+		if (direction == EP_DIR_IN) {
+			epctrlx &= ~EPCTRL_TX_EP_STALL;
+			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
+		} else {
+			epctrlx &= ~EPCTRL_RX_EP_STALL;
+			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
+		}
+	}
+	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
+}
+
+static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
+{
+	u32 epctrlx;
+
+	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
+
+	if (direction == EP_DIR_OUT)
+		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
+	else
+		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
+}
+
+static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+{
+	struct mv_ep *ep;
+	unsigned long flags = 0;
+	int status = 0;
+	struct mv_udc *udc;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	udc = ep->udc;
+	if (!_ep || !ep->desc) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		status = -EOPNOTSUPP;
+		goto out;
+	}
+
+	/*
+	 * Attempt to halt IN ep will fail if any transfer requests
+	 * are still queue
+	 */
+	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
+		status = -EAGAIN;
+		goto out;
+	}
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
+	if (halt && wedge)
+		ep->wedge = 1;
+	else if (!halt)
+		ep->wedge = 0;
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+	if (ep->ep_num == 0) {
+		udc->ep0_state = WAIT_FOR_SETUP;
+		udc->ep0_dir = EP_DIR_OUT;
+	}
+out:
+	return status;
+}
+
+static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
+{
+	return mv_ep_set_halt_wedge(_ep, halt, 0);
+}
+
+static int mv_ep_set_wedge(struct usb_ep *_ep)
+{
+	return mv_ep_set_halt_wedge(_ep, 1, 1);
+}
+
+static struct usb_ep_ops mv_ep_ops = {
+	.enable		= mv_ep_enable,
+	.disable	= mv_ep_disable,
+
+	.alloc_request	= mv_alloc_request,
+	.free_request	= mv_free_request,
+
+	.queue		= mv_ep_queue,
+	.dequeue	= mv_ep_dequeue,
+
+	.set_wedge	= mv_ep_set_wedge,
+	.set_halt	= mv_ep_set_halt,
+	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
+};
+
+static void udc_clock_enable(struct mv_udc *udc)
+{
+	unsigned int i;
+
+	for (i = 0; i < udc->clknum; i++)
+		clk_enable(udc->clk[i]);
+}
+
+static void udc_clock_disable(struct mv_udc *udc)
+{
+	unsigned int i;
+
+	for (i = 0; i < udc->clknum; i++)
+		clk_disable(udc->clk[i]);
+}
+
+static void udc_stop(struct mv_udc *udc)
+{
+	u32 tmp;
+
+	/* Disable interrupts */
+	tmp = readl(&udc->op_regs->usbintr);
+	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
+		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
+	writel(tmp, &udc->op_regs->usbintr);
+
+	udc->stopped = 1;
+
+	/* Reset the Run the bit in the command register to stop VUSB */
+	tmp = readl(&udc->op_regs->usbcmd);
+	tmp &= ~USBCMD_RUN_STOP;
+	writel(tmp, &udc->op_regs->usbcmd);
+}
+
+static void udc_start(struct mv_udc *udc)
+{
+	u32 usbintr;
+
+	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
+		| USBINTR_PORT_CHANGE_DETECT_EN
+		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
+	/* Enable interrupts */
+	writel(usbintr, &udc->op_regs->usbintr);
+
+	udc->stopped = 0;
+
+	/* Set the Run bit in the command register */
+	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
+}
+
+static int udc_reset(struct mv_udc *udc)
+{
+	unsigned int loops;
+	u32 tmp, portsc;
+
+	/* Stop the controller */
+	tmp = readl(&udc->op_regs->usbcmd);
+	tmp &= ~USBCMD_RUN_STOP;
+	writel(tmp, &udc->op_regs->usbcmd);
+
+	/* Reset the controller to get default values */
+	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
+
+	/* wait for reset to complete */
+	loops = LOOPS(RESET_TIMEOUT);
+	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
+		if (loops == 0) {
+			dev_err(&udc->dev->dev,
+				"Wait for RESET completed TIMEOUT\n");
+			return -ETIMEDOUT;
+		}
+		loops--;
+		udelay(LOOPS_USEC);
+	}
+
+	/* set controller to device mode */
+	tmp = readl(&udc->op_regs->usbmode);
+	tmp |= USBMODE_CTRL_MODE_DEVICE;
+
+	/* turn setup lockout off, require setup tripwire in usbcmd */
+	tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
+
+	writel(tmp, &udc->op_regs->usbmode);
+
+	writel(0x0, &udc->op_regs->epsetupstat);
+
+	/* Configure the Endpoint List Address */
+	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
+		&udc->op_regs->eplistaddr);
+
+	portsc = readl(&udc->op_regs->portsc[0]);
+	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
+		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
+
+	if (udc->force_fs)
+		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
+	else
+		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
+
+	writel(portsc, &udc->op_regs->portsc[0]);
+
+	tmp = readl(&udc->op_regs->epctrlx[0]);
+	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
+	writel(tmp, &udc->op_regs->epctrlx[0]);
+
+	return 0;
+}
+
+static int mv_udc_enable_internal(struct mv_udc *udc)
+{
+	int retval;
+
+	if (udc->active)
+		return 0;
+
+	dev_dbg(&udc->dev->dev, "enable udc\n");
+	udc_clock_enable(udc);
+	if (udc->pdata->phy_init) {
+		retval = udc->pdata->phy_init(udc->phy_regs);
+		if (retval) {
+			dev_err(&udc->dev->dev,
+				"init phy error %d\n", retval);
+			udc_clock_disable(udc);
+			return retval;
+		}
+	}
+	udc->active = 1;
+
+	return 0;
+}
+
+static int mv_udc_enable(struct mv_udc *udc)
+{
+	if (udc->clock_gating)
+		return mv_udc_enable_internal(udc);
+
+	return 0;
+}
+
+static void mv_udc_disable_internal(struct mv_udc *udc)
+{
+	if (udc->active) {
+		dev_dbg(&udc->dev->dev, "disable udc\n");
+		if (udc->pdata->phy_deinit)
+			udc->pdata->phy_deinit(udc->phy_regs);
+		udc_clock_disable(udc);
+		udc->active = 0;
+	}
+}
+
+static void mv_udc_disable(struct mv_udc *udc)
+{
+	if (udc->clock_gating)
+		mv_udc_disable_internal(udc);
+}
+
+static int mv_udc_get_frame(struct usb_gadget *gadget)
+{
+	struct mv_udc *udc;
+	u16	retval;
+
+	if (!gadget)
+		return -ENODEV;
+
+	udc = container_of(gadget, struct mv_udc, gadget);
+
+	retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
+
+	return retval;
+}
+
+/* Tries to wake up the host connected to this gadget */
+static int mv_udc_wakeup(struct usb_gadget *gadget)
+{
+	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
+	u32 portsc;
+
+	/* Remote wakeup feature not enabled by host */
+	if (!udc->remote_wakeup)
+		return -ENOTSUPP;
+
+	portsc = readl(&udc->op_regs->portsc);
+	/* not suspended? */
+	if (!(portsc & PORTSCX_PORT_SUSPEND))
+		return 0;
+	/* trigger force resume */
+	portsc |= PORTSCX_PORT_FORCE_RESUME;
+	writel(portsc, &udc->op_regs->portsc[0]);
+	return 0;
+}
+
+static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct mv_udc *udc;
+	unsigned long flags;
+	int retval = 0;
+
+	udc = container_of(gadget, struct mv_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+
+	udc->vbus_active = (is_active != 0);
+
+	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+		__func__, udc->softconnect, udc->vbus_active);
+
+	if (udc->driver && udc->softconnect && udc->vbus_active) {
+		retval = mv_udc_enable(udc);
+		if (retval == 0) {
+			/* Clock is disabled, need re-init registers */
+			udc_reset(udc);
+			ep0_reset(udc);
+			udc_start(udc);
+		}
+	} else if (udc->driver && udc->softconnect) {
+		/* stop all the transfer in queue*/
+		stop_activity(udc, udc->driver);
+		udc_stop(udc);
+		mv_udc_disable(udc);
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return retval;
+}
+
+static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct mv_udc *udc;
+	unsigned long flags;
+	int retval = 0;
+
+	udc = container_of(gadget, struct mv_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+
+	udc->softconnect = (is_on != 0);
+
+	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+			__func__, udc->softconnect, udc->vbus_active);
+
+	if (udc->driver && udc->softconnect && udc->vbus_active) {
+		retval = mv_udc_enable(udc);
+		if (retval == 0) {
+			/* Clock is disabled, need re-init registers */
+			udc_reset(udc);
+			ep0_reset(udc);
+			udc_start(udc);
+		}
+	} else if (udc->driver && udc->vbus_active) {
+		/* stop all the transfer in queue*/
+		stop_activity(udc, udc->driver);
+		udc_stop(udc);
+		mv_udc_disable(udc);
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return retval;
+}
+
+static int mv_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int mv_udc_stop(struct usb_gadget_driver *driver);
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops mv_ops = {
+
+	/* returns the current frame number */
+	.get_frame	= mv_udc_get_frame,
+
+	/* tries to wake up the host connected to this gadget */
+	.wakeup		= mv_udc_wakeup,
+
+	/* notify controller that VBUS is powered or not */
+	.vbus_session	= mv_udc_vbus_session,
+
+	/* D+ pullup, software-controlled connect/disconnect to USB host */
+	.pullup		= mv_udc_pullup,
+	.start		= mv_udc_start,
+	.stop		= mv_udc_stop,
+};
+
+static int eps_init(struct mv_udc *udc)
+{
+	struct mv_ep	*ep;
+	char name[14];
+	int i;
+
+	/* initialize ep0 */
+	ep = &udc->eps[0];
+	ep->udc = udc;
+	strncpy(ep->name, "ep0", sizeof(ep->name));
+	ep->ep.name = ep->name;
+	ep->ep.ops = &mv_ep_ops;
+	ep->wedge = 0;
+	ep->stopped = 0;
+	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+	ep->ep_num = 0;
+	ep->desc = &mv_ep0_desc;
+	INIT_LIST_HEAD(&ep->queue);
+
+	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+	/* initialize other endpoints */
+	for (i = 2; i < udc->max_eps * 2; i++) {
+		ep = &udc->eps[i];
+		if (i % 2) {
+			snprintf(name, sizeof(name), "ep%din", i / 2);
+			ep->direction = EP_DIR_IN;
+		} else {
+			snprintf(name, sizeof(name), "ep%dout", i / 2);
+			ep->direction = EP_DIR_OUT;
+		}
+		ep->udc = udc;
+		strncpy(ep->name, name, sizeof(ep->name));
+		ep->ep.name = ep->name;
+
+		ep->ep.ops = &mv_ep_ops;
+		ep->stopped = 0;
+		ep->ep.maxpacket = (unsigned short) ~0;
+		ep->ep_num = i / 2;
+
+		INIT_LIST_HEAD(&ep->queue);
+		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+
+		ep->dqh = &udc->ep_dqh[i];
+	}
+
+	return 0;
+}
+
+/* delete all endpoint requests, called with spinlock held */
+static void nuke(struct mv_ep *ep, int status)
+{
+	/* called with spinlock held */
+	ep->stopped = 1;
+
+	/* endpoint fifo flush */
+	mv_ep_fifo_flush(&ep->ep);
+
+	while (!list_empty(&ep->queue)) {
+		struct mv_req *req = NULL;
+		req = list_entry(ep->queue.next, struct mv_req, queue);
+		done(ep, req, status);
+	}
+}
+
+/* stop all USB activities */
+static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
+{
+	struct mv_ep	*ep;
+
+	nuke(&udc->eps[0], -ESHUTDOWN);
+
+	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+		nuke(ep, -ESHUTDOWN);
+	}
+
+	/* report disconnect; the driver is already quiesced */
+	if (driver) {
+		spin_unlock(&udc->lock);
+		driver->disconnect(&udc->gadget);
+		spin_lock(&udc->lock);
+	}
+}
+
+static int mv_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct mv_udc *udc = the_controller;
+	int retval = 0;
+	unsigned long flags;
+
+	if (!udc)
+		return -ENODEV;
+
+	if (udc->driver)
+		return -EBUSY;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* hook up the driver ... */
+	driver->driver.bus = NULL;
+	udc->driver = driver;
+	udc->gadget.dev.driver = &driver->driver;
+
+	udc->usb_state = USB_STATE_ATTACHED;
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->ep0_dir = EP_DIR_OUT;
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	retval = bind(&udc->gadget);
+	if (retval) {
+		dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
+				driver->driver.name, retval);
+		udc->driver = NULL;
+		udc->gadget.dev.driver = NULL;
+		return retval;
+	}
+
+	if (udc->transceiver) {
+		retval = otg_set_peripheral(udc->transceiver->otg,
+					&udc->gadget);
+		if (retval) {
+			dev_err(&udc->dev->dev,
+				"unable to register peripheral to otg\n");
+			if (driver->unbind) {
+				driver->unbind(&udc->gadget);
+				udc->gadget.dev.driver = NULL;
+				udc->driver = NULL;
+			}
+			return retval;
+		}
+	}
+
+	/* pullup is always on */
+	mv_udc_pullup(&udc->gadget, 1);
+
+	/* When boot with cable attached, there will be no vbus irq occurred */
+	if (udc->qwork)
+		queue_work(udc->qwork, &udc->vbus_work);
+
+	return 0;
+}
+
+static int mv_udc_stop(struct usb_gadget_driver *driver)
+{
+	struct mv_udc *udc = the_controller;
+	unsigned long flags;
+
+	if (!udc)
+		return -ENODEV;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	mv_udc_enable(udc);
+	udc_stop(udc);
+
+	/* stop all usb activities */
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	stop_activity(udc, driver);
+	mv_udc_disable(udc);
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	/* unbind gadget driver */
+	driver->unbind(&udc->gadget);
+	udc->gadget.dev.driver = NULL;
+	udc->driver = NULL;
+
+	return 0;
+}
+
+static void mv_set_ptc(struct mv_udc *udc, u32 mode)
+{
+	u32 portsc;
+
+	portsc = readl(&udc->op_regs->portsc[0]);
+	portsc |= mode << 16;
+	writel(portsc, &udc->op_regs->portsc[0]);
+}
+
+static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
+{
+	struct mv_udc *udc = the_controller;
+	struct mv_req *req = container_of(_req, struct mv_req, req);
+	unsigned long flags;
+
+	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
+
+	spin_lock_irqsave(&udc->lock, flags);
+	if (req->test_mode) {
+		mv_set_ptc(udc, req->test_mode);
+		req->test_mode = 0;
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static int
+udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
+{
+	int retval = 0;
+	struct mv_req *req;
+	struct mv_ep *ep;
+
+	ep = &udc->eps[0];
+	udc->ep0_dir = direction;
+	udc->ep0_state = WAIT_FOR_OUT_STATUS;
+
+	req = udc->status_req;
+
+	/* fill in the reqest structure */
+	if (empty == false) {
+		*((u16 *) req->req.buf) = cpu_to_le16(status);
+		req->req.length = 2;
+	} else
+		req->req.length = 0;
+
+	req->ep = ep;
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	if (udc->test_mode) {
+		req->req.complete = prime_status_complete;
+		req->test_mode = udc->test_mode;
+		udc->test_mode = 0;
+	} else
+		req->req.complete = NULL;
+	req->dtd_count = 0;
+
+	if (req->req.dma == DMA_ADDR_INVALID) {
+		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+				req->req.buf, req->req.length,
+				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		req->mapped = 1;
+	}
+
+	/* prime the data phase */
+	if (!req_to_dtd(req))
+		retval = queue_dtd(ep, req);
+	else{	/* no mem */
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (retval) {
+		dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
+		goto out;
+	}
+
+	list_add_tail(&req->queue, &ep->queue);
+
+	return 0;
+out:
+	return retval;
+}
+
+static void mv_udc_testmode(struct mv_udc *udc, u16 index)
+{
+	if (index <= TEST_FORCE_EN) {
+		udc->test_mode = index;
+		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+			ep0_stall(udc);
+	} else
+		dev_err(&udc->dev->dev,
+			"This test mode(%d) is not supported\n", index);
+}
+
+static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+	udc->dev_addr = (u8)setup->wValue;
+
+	/* update usb state */
+	udc->usb_state = USB_STATE_ADDRESS;
+
+	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+		ep0_stall(udc);
+}
+
+static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
+	struct usb_ctrlrequest *setup)
+{
+	u16 status = 0;
+	int retval;
+
+	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+		!= (USB_DIR_IN | USB_TYPE_STANDARD))
+		return;
+
+	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		status = 1 << USB_DEVICE_SELF_POWERED;
+		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+	} else if ((setup->bRequestType & USB_RECIP_MASK)
+			== USB_RECIP_INTERFACE) {
+		/* get interface status */
+		status = 0;
+	} else if ((setup->bRequestType & USB_RECIP_MASK)
+			== USB_RECIP_ENDPOINT) {
+		u8 ep_num, direction;
+
+		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+				? EP_DIR_IN : EP_DIR_OUT;
+		status = ep_is_stall(udc, ep_num, direction)
+				<< USB_ENDPOINT_HALT;
+	}
+
+	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
+	if (retval)
+		ep0_stall(udc);
+	else
+		udc->ep0_state = DATA_STATE_XMIT;
+}
+
+static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+	u8 ep_num;
+	u8 direction;
+	struct mv_ep *ep;
+
+	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
+		switch (setup->wValue) {
+		case USB_DEVICE_REMOTE_WAKEUP:
+			udc->remote_wakeup = 0;
+			break;
+		default:
+			goto out;
+		}
+	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
+		switch (setup->wValue) {
+		case USB_ENDPOINT_HALT:
+			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+				? EP_DIR_IN : EP_DIR_OUT;
+			if (setup->wValue != 0 || setup->wLength != 0
+				|| ep_num > udc->max_eps)
+				goto out;
+			ep = &udc->eps[ep_num * 2 + direction];
+			if (ep->wedge == 1)
+				break;
+			spin_unlock(&udc->lock);
+			ep_set_stall(udc, ep_num, direction, 0);
+			spin_lock(&udc->lock);
+			break;
+		default:
+			goto out;
+		}
+	} else
+		goto out;
+
+	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+		ep0_stall(udc);
+out:
+	return;
+}
+
+static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+	u8 ep_num;
+	u8 direction;
+
+	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
+		switch (setup->wValue) {
+		case USB_DEVICE_REMOTE_WAKEUP:
+			udc->remote_wakeup = 1;
+			break;
+		case USB_DEVICE_TEST_MODE:
+			if (setup->wIndex & 0xFF
+				||  udc->gadget.speed != USB_SPEED_HIGH)
+				ep0_stall(udc);
+
+			if (udc->usb_state != USB_STATE_CONFIGURED
+				&& udc->usb_state != USB_STATE_ADDRESS
+				&& udc->usb_state != USB_STATE_DEFAULT)
+				ep0_stall(udc);
+
+			mv_udc_testmode(udc, (setup->wIndex >> 8));
+			goto out;
+		default:
+			goto out;
+		}
+	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
+		switch (setup->wValue) {
+		case USB_ENDPOINT_HALT:
+			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+				? EP_DIR_IN : EP_DIR_OUT;
+			if (setup->wValue != 0 || setup->wLength != 0
+				|| ep_num > udc->max_eps)
+				goto out;
+			spin_unlock(&udc->lock);
+			ep_set_stall(udc, ep_num, direction, 1);
+			spin_lock(&udc->lock);
+			break;
+		default:
+			goto out;
+		}
+	} else
+		goto out;
+
+	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+		ep0_stall(udc);
+out:
+	return;
+}
+
+static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
+	struct usb_ctrlrequest *setup)
+{
+	bool delegate = false;
+
+	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
+
+	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+			setup->bRequestType, setup->bRequest,
+			setup->wValue, setup->wIndex, setup->wLength);
+	/* We process some stardard setup requests here */
+	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (setup->bRequest) {
+		case USB_REQ_GET_STATUS:
+			ch9getstatus(udc, ep_num, setup);
+			break;
+
+		case USB_REQ_SET_ADDRESS:
+			ch9setaddress(udc, setup);
+			break;
+
+		case USB_REQ_CLEAR_FEATURE:
+			ch9clearfeature(udc, setup);
+			break;
+
+		case USB_REQ_SET_FEATURE:
+			ch9setfeature(udc, setup);
+			break;
+
+		default:
+			delegate = true;
+		}
+	} else
+		delegate = true;
+
+	/* delegate USB standard requests to the gadget driver */
+	if (delegate == true) {
+		/* USB requests handled by gadget */
+		if (setup->wLength) {
+			/* DATA phase from gadget, STATUS phase from udc */
+			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+					?  EP_DIR_IN : EP_DIR_OUT;
+			spin_unlock(&udc->lock);
+			if (udc->driver->setup(&udc->gadget,
+				&udc->local_setup_buff) < 0)
+				ep0_stall(udc);
+			spin_lock(&udc->lock);
+			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
+					?  DATA_STATE_XMIT : DATA_STATE_RECV;
+		} else {
+			/* no DATA phase, IN STATUS phase from gadget */
+			udc->ep0_dir = EP_DIR_IN;
+			spin_unlock(&udc->lock);
+			if (udc->driver->setup(&udc->gadget,
+				&udc->local_setup_buff) < 0)
+				ep0_stall(udc);
+			spin_lock(&udc->lock);
+			udc->ep0_state = WAIT_FOR_OUT_STATUS;
+		}
+	}
+}
+
+/* complete DATA or STATUS phase of ep0 prime status phase if needed */
+static void ep0_req_complete(struct mv_udc *udc,
+	struct mv_ep *ep0, struct mv_req *req)
+{
+	u32 new_addr;
+
+	if (udc->usb_state == USB_STATE_ADDRESS) {
+		/* set the new address */
+		new_addr = (u32)udc->dev_addr;
+		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
+			&udc->op_regs->deviceaddr);
+	}
+
+	done(ep0, req, 0);
+
+	switch (udc->ep0_state) {
+	case DATA_STATE_XMIT:
+		/* receive status phase */
+		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
+			ep0_stall(udc);
+		break;
+	case DATA_STATE_RECV:
+		/* send status phase */
+		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
+			ep0_stall(udc);
+		break;
+	case WAIT_FOR_OUT_STATUS:
+		udc->ep0_state = WAIT_FOR_SETUP;
+		break;
+	case WAIT_FOR_SETUP:
+		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
+		break;
+	default:
+		ep0_stall(udc);
+		break;
+	}
+}
+
+static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
+{
+	u32 temp;
+	struct mv_dqh *dqh;
+
+	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
+
+	/* Clear bit in ENDPTSETUPSTAT */
+	writel((1 << ep_num), &udc->op_regs->epsetupstat);
+
+	/* while a hazard exists when setup package arrives */
+	do {
+		/* Set Setup Tripwire */
+		temp = readl(&udc->op_regs->usbcmd);
+		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
+
+		/* Copy the setup packet to local buffer */
+		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
+	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
+
+	/* Clear Setup Tripwire */
+	temp = readl(&udc->op_regs->usbcmd);
+	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
+}
+
+static void irq_process_tr_complete(struct mv_udc *udc)
+{
+	u32 tmp, bit_pos;
+	int i, ep_num = 0, direction = 0;
+	struct mv_ep	*curr_ep;
+	struct mv_req *curr_req, *temp_req;
+	int status;
+
+	/*
+	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
+	 * because the setup packets are to be read ASAP
+	 */
+
+	/* Process all Setup packet received interrupts */
+	tmp = readl(&udc->op_regs->epsetupstat);
+
+	if (tmp) {
+		for (i = 0; i < udc->max_eps; i++) {
+			if (tmp & (1 << i)) {
+				get_setup_data(udc, i,
+					(u8 *)(&udc->local_setup_buff));
+				handle_setup_packet(udc, i,
+					&udc->local_setup_buff);
+			}
+		}
+	}
+
+	/* Don't clear the endpoint setup status register here.
+	 * It is cleared as a setup packet is read out of the buffer
+	 */
+
+	/* Process non-setup transaction complete interrupts */
+	tmp = readl(&udc->op_regs->epcomplete);
+
+	if (!tmp)
+		return;
+
+	writel(tmp, &udc->op_regs->epcomplete);
+
+	for (i = 0; i < udc->max_eps * 2; i++) {
+		ep_num = i >> 1;
+		direction = i % 2;
+
+		bit_pos = 1 << (ep_num + 16 * direction);
+
+		if (!(bit_pos & tmp))
+			continue;
+
+		if (i == 1)
+			curr_ep = &udc->eps[0];
+		else
+			curr_ep = &udc->eps[i];
+		/* process the req queue until an uncomplete request */
+		list_for_each_entry_safe(curr_req, temp_req,
+			&curr_ep->queue, queue) {
+			status = process_ep_req(udc, i, curr_req);
+			if (status)
+				break;
+
+			/* write back status to req */
+			curr_req->req.status = status;
+
+			/* ep0 request completion */
+			if (ep_num == 0) {
+				ep0_req_complete(udc, curr_ep, curr_req);
+				break;
+			} else {
+				done(curr_ep, curr_req, status);
+			}
+		}
+	}
+}
+
+void irq_process_reset(struct mv_udc *udc)
+{
+	u32 tmp;
+	unsigned int loops;
+
+	udc->ep0_dir = EP_DIR_OUT;
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->remote_wakeup = 0;		/* default to 0 on reset */
+
+	/* The address bits are past bit 25-31. Set the address */
+	tmp = readl(&udc->op_regs->deviceaddr);
+	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
+	writel(tmp, &udc->op_regs->deviceaddr);
+
+	/* Clear all the setup token semaphores */
+	tmp = readl(&udc->op_regs->epsetupstat);
+	writel(tmp, &udc->op_regs->epsetupstat);
+
+	/* Clear all the endpoint complete status bits */
+	tmp = readl(&udc->op_regs->epcomplete);
+	writel(tmp, &udc->op_regs->epcomplete);
+
+	/* wait until all endptprime bits cleared */
+	loops = LOOPS(PRIME_TIMEOUT);
+	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
+		if (loops == 0) {
+			dev_err(&udc->dev->dev,
+				"Timeout for ENDPTPRIME = 0x%x\n",
+				readl(&udc->op_regs->epprime));
+			break;
+		}
+		loops--;
+		udelay(LOOPS_USEC);
+	}
+
+	/* Write 1s to the Flush register */
+	writel((u32)~0, &udc->op_regs->epflush);
+
+	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
+		dev_info(&udc->dev->dev, "usb bus reset\n");
+		udc->usb_state = USB_STATE_DEFAULT;
+		/* reset all the queues, stop all USB activities */
+		stop_activity(udc, udc->driver);
+	} else {
+		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
+			readl(&udc->op_regs->portsc));
+
+		/*
+		 * re-initialize
+		 * controller reset
+		 */
+		udc_reset(udc);
+
+		/* reset all the queues, stop all USB activities */
+		stop_activity(udc, udc->driver);
+
+		/* reset ep0 dQH and endptctrl */
+		ep0_reset(udc);
+
+		/* enable interrupt and set controller to run state */
+		udc_start(udc);
+
+		udc->usb_state = USB_STATE_ATTACHED;
+	}
+}
+
+static void handle_bus_resume(struct mv_udc *udc)
+{
+	udc->usb_state = udc->resume_state;
+	udc->resume_state = 0;
+
+	/* report resume to the driver */
+	if (udc->driver) {
+		if (udc->driver->resume) {
+			spin_unlock(&udc->lock);
+			udc->driver->resume(&udc->gadget);
+			spin_lock(&udc->lock);
+		}
+	}
+}
+
+static void irq_process_suspend(struct mv_udc *udc)
+{
+	udc->resume_state = udc->usb_state;
+	udc->usb_state = USB_STATE_SUSPENDED;
+
+	if (udc->driver->suspend) {
+		spin_unlock(&udc->lock);
+		udc->driver->suspend(&udc->gadget);
+		spin_lock(&udc->lock);
+	}
+}
+
+static void irq_process_port_change(struct mv_udc *udc)
+{
+	u32 portsc;
+
+	portsc = readl(&udc->op_regs->portsc[0]);
+	if (!(portsc & PORTSCX_PORT_RESET)) {
+		/* Get the speed */
+		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
+		switch (speed) {
+		case PORTSCX_PORT_SPEED_HIGH:
+			udc->gadget.speed = USB_SPEED_HIGH;
+			break;
+		case PORTSCX_PORT_SPEED_FULL:
+			udc->gadget.speed = USB_SPEED_FULL;
+			break;
+		case PORTSCX_PORT_SPEED_LOW:
+			udc->gadget.speed = USB_SPEED_LOW;
+			break;
+		default:
+			udc->gadget.speed = USB_SPEED_UNKNOWN;
+			break;
+		}
+	}
+
+	if (portsc & PORTSCX_PORT_SUSPEND) {
+		udc->resume_state = udc->usb_state;
+		udc->usb_state = USB_STATE_SUSPENDED;
+		if (udc->driver->suspend) {
+			spin_unlock(&udc->lock);
+			udc->driver->suspend(&udc->gadget);
+			spin_lock(&udc->lock);
+		}
+	}
+
+	if (!(portsc & PORTSCX_PORT_SUSPEND)
+		&& udc->usb_state == USB_STATE_SUSPENDED) {
+		handle_bus_resume(udc);
+	}
+
+	if (!udc->resume_state)
+		udc->usb_state = USB_STATE_DEFAULT;
+}
+
+static void irq_process_error(struct mv_udc *udc)
+{
+	/* Increment the error count */
+	udc->errors++;
+}
+
+static irqreturn_t mv_udc_irq(int irq, void *dev)
+{
+	struct mv_udc *udc = (struct mv_udc *)dev;
+	u32 status, intr;
+
+	/* Disable ISR when stopped bit is set */
+	if (udc->stopped)
+		return IRQ_NONE;
+
+	spin_lock(&udc->lock);
+
+	status = readl(&udc->op_regs->usbsts);
+	intr = readl(&udc->op_regs->usbintr);
+	status &= intr;
+
+	if (status == 0) {
+		spin_unlock(&udc->lock);
+		return IRQ_NONE;
+	}
+
+	/* Clear all the interrupts occurred */
+	writel(status, &udc->op_regs->usbsts);
+
+	if (status & USBSTS_ERR)
+		irq_process_error(udc);
+
+	if (status & USBSTS_RESET)
+		irq_process_reset(udc);
+
+	if (status & USBSTS_PORT_CHANGE)
+		irq_process_port_change(udc);
+
+	if (status & USBSTS_INT)
+		irq_process_tr_complete(udc);
+
+	if (status & USBSTS_SUSPEND)
+		irq_process_suspend(udc);
+
+	spin_unlock(&udc->lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
+{
+	struct mv_udc *udc = (struct mv_udc *)dev;
+
+	/* polling VBUS and init phy may cause too much time*/
+	if (udc->qwork)
+		queue_work(udc->qwork, &udc->vbus_work);
+
+	return IRQ_HANDLED;
+}
+
+static void mv_udc_vbus_work(struct work_struct *work)
+{
+	struct mv_udc *udc;
+	unsigned int vbus;
+
+	udc = container_of(work, struct mv_udc, vbus_work);
+	if (!udc->pdata->vbus)
+		return;
+
+	vbus = udc->pdata->vbus->poll();
+	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
+
+	if (vbus == VBUS_HIGH)
+		mv_udc_vbus_session(&udc->gadget, 1);
+	else if (vbus == VBUS_LOW)
+		mv_udc_vbus_session(&udc->gadget, 0);
+}
+
+/* release device structure */
+static void gadget_release(struct device *_dev)
+{
+	struct mv_udc *udc = the_controller;
+
+	complete(udc->done);
+}
+
+static int __devexit mv_udc_remove(struct platform_device *dev)
+{
+	struct mv_udc *udc = the_controller;
+	int clk_i;
+
+	usb_del_gadget_udc(&udc->gadget);
+
+	if (udc->qwork) {
+		flush_workqueue(udc->qwork);
+		destroy_workqueue(udc->qwork);
+	}
+
+	/*
+	 * If we have transceiver inited,
+	 * then vbus irq will not be requested in udc driver.
+	 */
+	if (udc->pdata && udc->pdata->vbus
+		&& udc->clock_gating && udc->transceiver == NULL)
+		free_irq(udc->pdata->vbus->irq, &dev->dev);
+
+	/* free memory allocated in probe */
+	if (udc->dtd_pool)
+		dma_pool_destroy(udc->dtd_pool);
+
+	if (udc->ep_dqh)
+		dma_free_coherent(&dev->dev, udc->ep_dqh_size,
+			udc->ep_dqh, udc->ep_dqh_dma);
+
+	kfree(udc->eps);
+
+	if (udc->irq)
+		free_irq(udc->irq, &dev->dev);
+
+	mv_udc_disable(udc);
+
+	if (udc->cap_regs)
+		iounmap(udc->cap_regs);
+
+	if (udc->phy_regs)
+		iounmap(udc->phy_regs);
+
+	if (udc->status_req) {
+		kfree(udc->status_req->req.buf);
+		kfree(udc->status_req);
+	}
+
+	for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
+		clk_put(udc->clk[clk_i]);
+
+	device_unregister(&udc->gadget.dev);
+
+	/* free dev, wait for the release() finished */
+	wait_for_completion(udc->done);
+	kfree(udc);
+
+	the_controller = NULL;
+
+	return 0;
+}
+
+static int __devinit mv_udc_probe(struct platform_device *dev)
+{
+	struct mv_usb_platform_data *pdata = dev->dev.platform_data;
+	struct mv_udc *udc;
+	int retval = 0;
+	int clk_i = 0;
+	struct resource *r;
+	size_t size;
+
+	if (pdata == NULL) {
+		dev_err(&dev->dev, "missing platform_data\n");
+		return -ENODEV;
+	}
+
+	size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
+	udc = kzalloc(size, GFP_KERNEL);
+	if (udc == NULL) {
+		dev_err(&dev->dev, "failed to allocate memory for udc\n");
+		return -ENOMEM;
+	}
+
+	the_controller = udc;
+	udc->done = &release_done;
+	udc->pdata = dev->dev.platform_data;
+	spin_lock_init(&udc->lock);
+
+	udc->dev = dev;
+
+#ifdef CONFIG_USB_OTG_UTILS
+	if (pdata->mode == MV_USB_MODE_OTG)
+		udc->transceiver = usb_get_transceiver();
+#endif
+
+	udc->clknum = pdata->clknum;
+	for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
+		udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
+		if (IS_ERR(udc->clk[clk_i])) {
+			retval = PTR_ERR(udc->clk[clk_i]);
+			goto err_put_clk;
+		}
+	}
+
+	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
+	if (r == NULL) {
+		dev_err(&dev->dev, "no I/O memory resource defined\n");
+		retval = -ENODEV;
+		goto err_put_clk;
+	}
+
+	udc->cap_regs = (struct mv_cap_regs __iomem *)
+		ioremap(r->start, resource_size(r));
+	if (udc->cap_regs == NULL) {
+		dev_err(&dev->dev, "failed to map I/O memory\n");
+		retval = -EBUSY;
+		goto err_put_clk;
+	}
+
+	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
+	if (r == NULL) {
+		dev_err(&dev->dev, "no phy I/O memory resource defined\n");
+		retval = -ENODEV;
+		goto err_iounmap_capreg;
+	}
+
+	udc->phy_regs = ioremap(r->start, resource_size(r));
+	if (udc->phy_regs == NULL) {
+		dev_err(&dev->dev, "failed to map phy I/O memory\n");
+		retval = -EBUSY;
+		goto err_iounmap_capreg;
+	}
+
+	/* we will acces controller register, so enable the clk */
+	retval = mv_udc_enable_internal(udc);
+	if (retval)
+		goto err_iounmap_phyreg;
+
+	udc->op_regs =
+		(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
+		+ (readl(&udc->cap_regs->caplength_hciversion)
+			& CAPLENGTH_MASK));
+	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
+
+	/*
+	 * some platform will use usb to download image, it may not disconnect
+	 * usb gadget before loading kernel. So first stop udc here.
+	 */
+	udc_stop(udc);
+	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
+
+	size = udc->max_eps * sizeof(struct mv_dqh) *2;
+	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
+	udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
+					&udc->ep_dqh_dma, GFP_KERNEL);
+
+	if (udc->ep_dqh == NULL) {
+		dev_err(&dev->dev, "allocate dQH memory failed\n");
+		retval = -ENOMEM;
+		goto err_disable_clock;
+	}
+	udc->ep_dqh_size = size;
+
+	/* create dTD dma_pool resource */
+	udc->dtd_pool = dma_pool_create("mv_dtd",
+			&dev->dev,
+			sizeof(struct mv_dtd),
+			DTD_ALIGNMENT,
+			DMA_BOUNDARY);
+
+	if (!udc->dtd_pool) {
+		retval = -ENOMEM;
+		goto err_free_dma;
+	}
+
+	size = udc->max_eps * sizeof(struct mv_ep) *2;
+	udc->eps = kzalloc(size, GFP_KERNEL);
+	if (udc->eps == NULL) {
+		dev_err(&dev->dev, "allocate ep memory failed\n");
+		retval = -ENOMEM;
+		goto err_destroy_dma;
+	}
+
+	/* initialize ep0 status request structure */
+	udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
+	if (!udc->status_req) {
+		dev_err(&dev->dev, "allocate status_req memory failed\n");
+		retval = -ENOMEM;
+		goto err_free_eps;
+	}
+	INIT_LIST_HEAD(&udc->status_req->queue);
+
+	/* allocate a small amount of memory to get valid address */
+	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
+	udc->status_req->req.dma = DMA_ADDR_INVALID;
+
+	udc->resume_state = USB_STATE_NOTATTACHED;
+	udc->usb_state = USB_STATE_POWERED;
+	udc->ep0_dir = EP_DIR_OUT;
+	udc->remote_wakeup = 0;
+
+	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
+	if (r == NULL) {
+		dev_err(&dev->dev, "no IRQ resource defined\n");
+		retval = -ENODEV;
+		goto err_free_status_req;
+	}
+	udc->irq = r->start;
+	if (request_irq(udc->irq, mv_udc_irq,
+		IRQF_SHARED, driver_name, udc)) {
+		dev_err(&dev->dev, "Request irq %d for UDC failed\n",
+			udc->irq);
+		retval = -ENODEV;
+		goto err_free_status_req;
+	}
+
+	/* initialize gadget structure */
+	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
+	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
+	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
+	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
+	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&udc->gadget.dev, "gadget");
+	udc->gadget.dev.parent = &dev->dev;
+	udc->gadget.dev.dma_mask = dev->dev.dma_mask;
+	udc->gadget.dev.release = gadget_release;
+	udc->gadget.name = driver_name;		/* gadget name */
+
+	retval = device_register(&udc->gadget.dev);
+	if (retval)
+		goto err_free_irq;
+
+	eps_init(udc);
+
+	/* VBUS detect: we can disable/enable clock on demand.*/
+	if (udc->transceiver)
+		udc->clock_gating = 1;
+	else if (pdata->vbus) {
+		udc->clock_gating = 1;
+		retval = request_threaded_irq(pdata->vbus->irq, NULL,
+				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
+		if (retval) {
+			dev_info(&dev->dev,
+				"Can not request irq for VBUS, "
+				"disable clock gating\n");
+			udc->clock_gating = 0;
+		}
+
+		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
+		if (!udc->qwork) {
+			dev_err(&dev->dev, "cannot create workqueue\n");
+			retval = -ENOMEM;
+			goto err_unregister;
+		}
+
+		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
+	}
+
+	/*
+	 * When clock gating is supported, we can disable clk and phy.
+	 * If not, it means that VBUS detection is not supported, we
+	 * have to enable vbus active all the time to let controller work.
+	 */
+	if (udc->clock_gating)
+		mv_udc_disable_internal(udc);
+	else
+		udc->vbus_active = 1;
+
+	retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
+	if (retval)
+		goto err_unregister;
+
+	dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
+		udc->clock_gating ? "with" : "without");
+
+	return 0;
+
+err_unregister:
+	if (udc->pdata && udc->pdata->vbus
+		&& udc->clock_gating && udc->transceiver == NULL)
+		free_irq(pdata->vbus->irq, &dev->dev);
+	device_unregister(&udc->gadget.dev);
+err_free_irq:
+	free_irq(udc->irq, &dev->dev);
+err_free_status_req:
+	kfree(udc->status_req->req.buf);
+	kfree(udc->status_req);
+err_free_eps:
+	kfree(udc->eps);
+err_destroy_dma:
+	dma_pool_destroy(udc->dtd_pool);
+err_free_dma:
+	dma_free_coherent(&dev->dev, udc->ep_dqh_size,
+			udc->ep_dqh, udc->ep_dqh_dma);
+err_disable_clock:
+	mv_udc_disable_internal(udc);
+err_iounmap_phyreg:
+	iounmap(udc->phy_regs);
+err_iounmap_capreg:
+	iounmap(udc->cap_regs);
+err_put_clk:
+	for (clk_i--; clk_i >= 0; clk_i--)
+		clk_put(udc->clk[clk_i]);
+	the_controller = NULL;
+	kfree(udc);
+	return retval;
+}
+
+#ifdef CONFIG_PM
+static int mv_udc_suspend(struct device *_dev)
+{
+	struct mv_udc *udc = the_controller;
+
+	/* if OTG is enabled, the following will be done in OTG driver*/
+	if (udc->transceiver)
+		return 0;
+
+	if (udc->pdata->vbus && udc->pdata->vbus->poll)
+		if (udc->pdata->vbus->poll() == VBUS_HIGH) {
+			dev_info(&udc->dev->dev, "USB cable is connected!\n");
+			return -EAGAIN;
+		}
+
+	/*
+	 * only cable is unplugged, udc can suspend.
+	 * So do not care about clock_gating == 1.
+	 */
+	if (!udc->clock_gating) {
+		udc_stop(udc);
+
+		spin_lock_irq(&udc->lock);
+		/* stop all usb activities */
+		stop_activity(udc, udc->driver);
+		spin_unlock_irq(&udc->lock);
+
+		mv_udc_disable_internal(udc);
+	}
+
+	return 0;
+}
+
+static int mv_udc_resume(struct device *_dev)
+{
+	struct mv_udc *udc = the_controller;
+	int retval;
+
+	/* if OTG is enabled, the following will be done in OTG driver*/
+	if (udc->transceiver)
+		return 0;
+
+	if (!udc->clock_gating) {
+		retval = mv_udc_enable_internal(udc);
+		if (retval)
+			return retval;
+
+		if (udc->driver && udc->softconnect) {
+			udc_reset(udc);
+			ep0_reset(udc);
+			udc_start(udc);
+		}
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops mv_udc_pm_ops = {
+	.suspend	= mv_udc_suspend,
+	.resume		= mv_udc_resume,
+};
+#endif
+
+static void mv_udc_shutdown(struct platform_device *dev)
+{
+	struct mv_udc *udc = the_controller;
+	u32 mode;
+
+	/* reset controller mode to IDLE */
+	mode = readl(&udc->op_regs->usbmode);
+	mode &= ~3;
+	writel(mode, &udc->op_regs->usbmode);
+}
+
+static struct platform_driver udc_driver = {
+	.probe		= mv_udc_probe,
+	.remove		= __exit_p(mv_udc_remove),
+	.shutdown	= mv_udc_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "mv-udc",
+#ifdef CONFIG_PM
+		.pm	= &mv_udc_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver(udc_driver);
+MODULE_ALIAS("platform:mv-udc");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ncm.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ncm.c
new file mode 100644
index 0000000..8953003
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ncm.c
@@ -0,0 +1,240 @@
+/*
+ * ncm.c -- NCM gadget driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com>
+ *
+ * The driver borrows from ether.c which is:
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+
+#include "u_ether.h"
+
+#define DRIVER_DESC		"NCM Gadget"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_ncm.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ * It's for devices with only CDC Ethernet configurations.
+ */
+#define CDC_VENDOR_NUM		0x0525	/* NetChip */
+#define CDC_PRODUCT_NUM		0xa4a1	/* Linux-USB Ethernet Gadget */
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16 (0x0200),
+
+	.bDeviceClass =		USB_CLASS_COMM,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id defaults change according to what configs
+	 * we support.  (As does bNumConfigurations.)  These values can
+	 * also be overridden by module parameters.
+	 */
+	.idVendor =		cpu_to_le16 (CDC_VENDOR_NUM),
+	.idProduct =		cpu_to_le16 (CDC_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static u8 hostaddr[ETH_ALEN];
+
+/*-------------------------------------------------------------------------*/
+
+static int __init ncm_do_config(struct usb_configuration *c)
+{
+	/* FIXME alloc iConfiguration string, set it in c->strings */
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	return ncm_bind_config(c, hostaddr);
+}
+
+static struct usb_configuration ncm_config_driver = {
+	/* .label = f(hardware) */
+	.label			= "CDC Ethernet (NCM)",
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init gncm_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			status;
+
+	/* set up network link layer */
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		return status;
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		/* We assume that can_support_ecm() tells the truth;
+		 * but if the controller isn't recognized at all then
+		 * that assumption is a bit more likely to be wrong.
+		 */
+		dev_warn(&gadget->dev,
+			 "controller '%s' not recognized; trying %s\n",
+			 gadget->name,
+			 ncm_config_driver.label);
+		device_desc.bcdDevice =
+			cpu_to_le16(0x0300 | 0x0099);
+	}
+
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	status = usb_add_config(cdev, &ncm_config_driver,
+				ncm_do_config);
+	if (status < 0)
+		goto fail;
+
+	dev_info(&gadget->dev, "%s\n", DRIVER_DESC);
+
+	return 0;
+
+fail:
+	gether_cleanup();
+	return status;
+}
+
+static int __exit gncm_unbind(struct usb_composite_dev *cdev)
+{
+	gether_cleanup();
+	return 0;
+}
+
+static struct usb_composite_driver ncm_driver = {
+	.name		= "g_ncm",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= __exit_p(gncm_unbind),
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Yauheni Kaliuta");
+MODULE_LICENSE("GPL");
+
+static int __init init(void)
+{
+	return usb_composite_probe(&ncm_driver, gncm_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&ncm_driver);
+}
+module_exit(cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ndis.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ndis.h
new file mode 100644
index 0000000..b0e52fc
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/ndis.h
@@ -0,0 +1,211 @@
+/*
+ * ndis.h
+ *
+ * ntddndis.h modified by Benedikt Spranger <b.spranger@pengutronix.de>
+ *
+ * Thanks to the cygwin development team,
+ * espacially to Casper S. Hornstrup <chorns@users.sourceforge.net>
+ *
+ * THIS SOFTWARE IS NOT COPYRIGHTED
+ *
+ * This source code is offered for use in the public domain. You may
+ * use, modify or distribute it freely.
+ */
+
+#ifndef _LINUX_NDIS_H
+#define _LINUX_NDIS_H
+
+
+#define NDIS_STATUS_MULTICAST_FULL	  0xC0010009
+#define NDIS_STATUS_MULTICAST_EXISTS      0xC001000A
+#define NDIS_STATUS_MULTICAST_NOT_FOUND   0xC001000B
+
+enum NDIS_DEVICE_POWER_STATE {
+	NdisDeviceStateUnspecified = 0,
+	NdisDeviceStateD0,
+	NdisDeviceStateD1,
+	NdisDeviceStateD2,
+	NdisDeviceStateD3,
+	NdisDeviceStateMaximum
+};
+
+struct NDIS_PM_WAKE_UP_CAPABILITIES {
+	enum NDIS_DEVICE_POWER_STATE  MinMagicPacketWakeUp;
+	enum NDIS_DEVICE_POWER_STATE  MinPatternWakeUp;
+	enum NDIS_DEVICE_POWER_STATE  MinLinkChangeWakeUp;
+};
+
+/* NDIS_PNP_CAPABILITIES.Flags constants */
+#define NDIS_DEVICE_WAKE_UP_ENABLE                0x00000001
+#define NDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE  0x00000002
+#define NDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE   0x00000004
+
+struct NDIS_PNP_CAPABILITIES {
+	__le32					Flags;
+	struct NDIS_PM_WAKE_UP_CAPABILITIES	WakeUpCapabilities;
+};
+
+struct NDIS_PM_PACKET_PATTERN {
+	__le32	Priority;
+	__le32	Reserved;
+	__le32	MaskSize;
+	__le32	PatternOffset;
+	__le32	PatternSize;
+	__le32	PatternFlags;
+};
+
+
+/* Required Object IDs (OIDs) */
+#define OID_GEN_SUPPORTED_LIST            0x00010101
+#define OID_GEN_HARDWARE_STATUS           0x00010102
+#define OID_GEN_MEDIA_SUPPORTED           0x00010103
+#define OID_GEN_MEDIA_IN_USE              0x00010104
+#define OID_GEN_MAXIMUM_LOOKAHEAD         0x00010105
+#define OID_GEN_MAXIMUM_FRAME_SIZE        0x00010106
+#define OID_GEN_LINK_SPEED                0x00010107
+#define OID_GEN_TRANSMIT_BUFFER_SPACE     0x00010108
+#define OID_GEN_RECEIVE_BUFFER_SPACE      0x00010109
+#define OID_GEN_TRANSMIT_BLOCK_SIZE       0x0001010A
+#define OID_GEN_RECEIVE_BLOCK_SIZE        0x0001010B
+#define OID_GEN_VENDOR_ID                 0x0001010C
+#define OID_GEN_VENDOR_DESCRIPTION        0x0001010D
+#define OID_GEN_CURRENT_PACKET_FILTER     0x0001010E
+#define OID_GEN_CURRENT_LOOKAHEAD         0x0001010F
+#define OID_GEN_DRIVER_VERSION            0x00010110
+#define OID_GEN_MAXIMUM_TOTAL_SIZE        0x00010111
+#define OID_GEN_PROTOCOL_OPTIONS          0x00010112
+#define OID_GEN_MAC_OPTIONS               0x00010113
+#define OID_GEN_MEDIA_CONNECT_STATUS      0x00010114
+#define OID_GEN_MAXIMUM_SEND_PACKETS      0x00010115
+#define OID_GEN_VENDOR_DRIVER_VERSION     0x00010116
+#define OID_GEN_SUPPORTED_GUIDS           0x00010117
+#define OID_GEN_NETWORK_LAYER_ADDRESSES   0x00010118
+#define OID_GEN_TRANSPORT_HEADER_OFFSET   0x00010119
+#define OID_GEN_MACHINE_NAME              0x0001021A
+#define OID_GEN_RNDIS_CONFIG_PARAMETER    0x0001021B
+#define OID_GEN_VLAN_ID                   0x0001021C
+
+/* Optional OIDs */
+#define OID_GEN_MEDIA_CAPABILITIES        0x00010201
+#define OID_GEN_PHYSICAL_MEDIUM           0x00010202
+
+/* Required statistics OIDs */
+#define OID_GEN_XMIT_OK                   0x00020101
+#define OID_GEN_RCV_OK                    0x00020102
+#define OID_GEN_XMIT_ERROR                0x00020103
+#define OID_GEN_RCV_ERROR                 0x00020104
+#define OID_GEN_RCV_NO_BUFFER             0x00020105
+
+/* Optional statistics OIDs */
+#define OID_GEN_DIRECTED_BYTES_XMIT       0x00020201
+#define OID_GEN_DIRECTED_FRAMES_XMIT      0x00020202
+#define OID_GEN_MULTICAST_BYTES_XMIT      0x00020203
+#define OID_GEN_MULTICAST_FRAMES_XMIT     0x00020204
+#define OID_GEN_BROADCAST_BYTES_XMIT      0x00020205
+#define OID_GEN_BROADCAST_FRAMES_XMIT     0x00020206
+#define OID_GEN_DIRECTED_BYTES_RCV        0x00020207
+#define OID_GEN_DIRECTED_FRAMES_RCV       0x00020208
+#define OID_GEN_MULTICAST_BYTES_RCV       0x00020209
+#define OID_GEN_MULTICAST_FRAMES_RCV      0x0002020A
+#define OID_GEN_BROADCAST_BYTES_RCV       0x0002020B
+#define OID_GEN_BROADCAST_FRAMES_RCV      0x0002020C
+#define OID_GEN_RCV_CRC_ERROR             0x0002020D
+#define OID_GEN_TRANSMIT_QUEUE_LENGTH     0x0002020E
+#define OID_GEN_GET_TIME_CAPS             0x0002020F
+#define OID_GEN_GET_NETCARD_TIME          0x00020210
+#define OID_GEN_NETCARD_LOAD              0x00020211
+#define OID_GEN_DEVICE_PROFILE            0x00020212
+#define OID_GEN_INIT_TIME_MS              0x00020213
+#define OID_GEN_RESET_COUNTS              0x00020214
+#define OID_GEN_MEDIA_SENSE_COUNTS        0x00020215
+#define OID_GEN_FRIENDLY_NAME             0x00020216
+#define OID_GEN_MINIPORT_INFO             0x00020217
+#define OID_GEN_RESET_VERIFY_PARAMETERS   0x00020218
+
+/* IEEE 802.3 (Ethernet) OIDs */
+#define NDIS_802_3_MAC_OPTION_PRIORITY    0x00000001
+
+#define OID_802_3_PERMANENT_ADDRESS       0x01010101
+#define OID_802_3_CURRENT_ADDRESS         0x01010102
+#define OID_802_3_MULTICAST_LIST          0x01010103
+#define OID_802_3_MAXIMUM_LIST_SIZE       0x01010104
+#define OID_802_3_MAC_OPTIONS             0x01010105
+#define OID_802_3_RCV_ERROR_ALIGNMENT     0x01020101
+#define OID_802_3_XMIT_ONE_COLLISION      0x01020102
+#define OID_802_3_XMIT_MORE_COLLISIONS    0x01020103
+#define OID_802_3_XMIT_DEFERRED           0x01020201
+#define OID_802_3_XMIT_MAX_COLLISIONS     0x01020202
+#define OID_802_3_RCV_OVERRUN             0x01020203
+#define OID_802_3_XMIT_UNDERRUN           0x01020204
+#define OID_802_3_XMIT_HEARTBEAT_FAILURE  0x01020205
+#define OID_802_3_XMIT_TIMES_CRS_LOST     0x01020206
+#define OID_802_3_XMIT_LATE_COLLISIONS    0x01020207
+
+/* OID_GEN_MINIPORT_INFO constants */
+#define NDIS_MINIPORT_BUS_MASTER                      0x00000001
+#define NDIS_MINIPORT_WDM_DRIVER                      0x00000002
+#define NDIS_MINIPORT_SG_LIST                         0x00000004
+#define NDIS_MINIPORT_SUPPORTS_MEDIA_QUERY            0x00000008
+#define NDIS_MINIPORT_INDICATES_PACKETS               0x00000010
+#define NDIS_MINIPORT_IGNORE_PACKET_QUEUE             0x00000020
+#define NDIS_MINIPORT_IGNORE_REQUEST_QUEUE            0x00000040
+#define NDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS        0x00000080
+#define NDIS_MINIPORT_INTERMEDIATE_DRIVER             0x00000100
+#define NDIS_MINIPORT_IS_NDIS_5                       0x00000200
+#define NDIS_MINIPORT_IS_CO                           0x00000400
+#define NDIS_MINIPORT_DESERIALIZE                     0x00000800
+#define NDIS_MINIPORT_REQUIRES_MEDIA_POLLING          0x00001000
+#define NDIS_MINIPORT_SUPPORTS_MEDIA_SENSE            0x00002000
+#define NDIS_MINIPORT_NETBOOT_CARD                    0x00004000
+#define NDIS_MINIPORT_PM_SUPPORTED                    0x00008000
+#define NDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE  0x00010000
+#define NDIS_MINIPORT_USES_SAFE_BUFFER_APIS           0x00020000
+#define NDIS_MINIPORT_HIDDEN                          0x00040000
+#define NDIS_MINIPORT_SWENUM                          0x00080000
+#define NDIS_MINIPORT_SURPRISE_REMOVE_OK              0x00100000
+#define NDIS_MINIPORT_NO_HALT_ON_SUSPEND              0x00200000
+#define NDIS_MINIPORT_HARDWARE_DEVICE                 0x00400000
+#define NDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS    0x00800000
+#define NDIS_MINIPORT_64BITS_DMA                      0x01000000
+
+#define NDIS_MEDIUM_802_3		0x00000000
+#define NDIS_MEDIUM_802_5		0x00000001
+#define NDIS_MEDIUM_FDDI		0x00000002
+#define NDIS_MEDIUM_WAN			0x00000003
+#define NDIS_MEDIUM_LOCAL_TALK		0x00000004
+#define NDIS_MEDIUM_DIX			0x00000005
+#define NDIS_MEDIUM_ARCENT_RAW		0x00000006
+#define NDIS_MEDIUM_ARCENT_878_2	0x00000007
+#define NDIS_MEDIUM_ATM			0x00000008
+#define NDIS_MEDIUM_WIRELESS_LAN	0x00000009
+#define NDIS_MEDIUM_IRDA		0x0000000A
+#define NDIS_MEDIUM_BPC			0x0000000B
+#define NDIS_MEDIUM_CO_WAN		0x0000000C
+#define NDIS_MEDIUM_1394		0x0000000D
+
+#define NDIS_PACKET_TYPE_DIRECTED	0x00000001
+#define NDIS_PACKET_TYPE_MULTICAST	0x00000002
+#define NDIS_PACKET_TYPE_ALL_MULTICAST	0x00000004
+#define NDIS_PACKET_TYPE_BROADCAST	0x00000008
+#define NDIS_PACKET_TYPE_SOURCE_ROUTING	0x00000010
+#define NDIS_PACKET_TYPE_PROMISCUOUS	0x00000020
+#define NDIS_PACKET_TYPE_SMT		0x00000040
+#define NDIS_PACKET_TYPE_ALL_LOCAL	0x00000080
+#define NDIS_PACKET_TYPE_GROUP		0x00000100
+#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL	0x00000200
+#define NDIS_PACKET_TYPE_FUNCTIONAL	0x00000400
+#define NDIS_PACKET_TYPE_MAC_FRAME	0x00000800
+
+#define NDIS_MEDIA_STATE_CONNECTED	0x00000000
+#define NDIS_MEDIA_STATE_DISCONNECTED	0x00000001
+
+#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA     0x00000001
+#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED      0x00000002
+#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND      0x00000004
+#define NDIS_MAC_OPTION_NO_LOOPBACK             0x00000008
+#define NDIS_MAC_OPTION_FULL_DUPLEX             0x00000010
+#define NDIS_MAC_OPTION_EOTX_INDICATION         0x00000020
+#define NDIS_MAC_OPTION_8021P_PRIORITY          0x00000040
+#define NDIS_MAC_OPTION_RESERVED                0x80000000
+
+#endif /* _LINUX_NDIS_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2272.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2272.c
new file mode 100644
index 0000000..43ac748
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2272.c
@@ -0,0 +1,2715 @@
+/*
+ * Driver for PLX NET2272 USB device controller
+ *
+ * Copyright (C) 2005-2006 PLX Technology, Inc.
+ * Copyright (C) 2006-2011 Analog Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include "net2272.h"
+
+#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
+
+static const char driver_name[] = "net2272";
+static const char driver_vers[] = "2006 October 17/mainline";
+static const char driver_desc[] = DRIVER_DESC;
+
+static const char ep0name[] = "ep0";
+static const char * const ep_name[] = {
+	ep0name,
+	"ep-a", "ep-b", "ep-c",
+};
+
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+#ifdef CONFIG_USB_GADGET_NET2272_DMA
+/*
+ * use_dma: the NET2272 can use an external DMA controller.
+ * Note that since there is no generic DMA api, some functions,
+ * notably request_dma, start_dma, and cancel_dma will need to be
+ * modified for your platform's particular dma controller.
+ *
+ * If use_dma is disabled, pio will be used instead.
+ */
+static bool use_dma = 0;
+module_param(use_dma, bool, 0644);
+
+/*
+ * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
+ * The NET2272 can only use dma for a single endpoint at a time.
+ * At some point this could be modified to allow either endpoint
+ * to take control of dma as it becomes available.
+ *
+ * Note that DMA should not be used on OUT endpoints unless it can
+ * be guaranteed that no short packets will arrive on an IN endpoint
+ * while the DMA operation is pending.  Otherwise the OUT DMA will
+ * terminate prematurely (See NET2272 Errata 630-0213-0101)
+ */
+static ushort dma_ep = 1;
+module_param(dma_ep, ushort, 0644);
+
+/*
+ * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
+ *	mode 0 == Slow DREQ mode
+ *	mode 1 == Fast DREQ mode
+ *	mode 2 == Burst mode
+ */
+static ushort dma_mode = 2;
+module_param(dma_mode, ushort, 0644);
+#else
+#define use_dma 0
+#define dma_ep 1
+#define dma_mode 2
+#endif
+
+/*
+ * fifo_mode: net2272 buffer configuration:
+ *      mode 0 == ep-{a,b,c} 512db each
+ *      mode 1 == ep-a 1k, ep-{b,c} 512db
+ *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
+ *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
+ */
+static ushort fifo_mode = 0;
+module_param(fifo_mode, ushort, 0644);
+
+/*
+ * enable_suspend: When enabled, the driver will respond to
+ * USB suspend requests by powering down the NET2272.  Otherwise,
+ * USB suspend requests will be ignored.  This is acceptible for
+ * self-powered devices.  For bus powered devices set this to 1.
+ */
+static ushort enable_suspend = 0;
+module_param(enable_suspend, ushort, 0644);
+
+static void assert_out_naking(struct net2272_ep *ep, const char *where)
+{
+	u8 tmp;
+
+#ifndef DEBUG
+	return;
+#endif
+
+	tmp = net2272_ep_read(ep, EP_STAT0);
+	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
+		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
+			ep->ep.name, where, tmp);
+		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+	}
+}
+#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
+
+static void stop_out_naking(struct net2272_ep *ep)
+{
+	u8 tmp = net2272_ep_read(ep, EP_STAT0);
+
+	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
+		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+}
+
+#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
+
+static char *type_string(u8 bmAttributes)
+{
+	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK: return "bulk";
+	case USB_ENDPOINT_XFER_ISOC: return "iso";
+	case USB_ENDPOINT_XFER_INT:  return "intr";
+	default:                     return "control";
+	}
+}
+
+static char *buf_state_string(unsigned state)
+{
+	switch (state) {
+	case BUFF_FREE:  return "free";
+	case BUFF_VALID: return "valid";
+	case BUFF_LCL:   return "local";
+	case BUFF_USB:   return "usb";
+	default:         return "unknown";
+	}
+}
+
+static char *dma_mode_string(void)
+{
+	if (!use_dma)
+		return "PIO";
+	switch (dma_mode) {
+	case 0:  return "SLOW DREQ";
+	case 1:  return "FAST DREQ";
+	case 2:  return "BURST";
+	default: return "invalid";
+	}
+}
+
+static void net2272_dequeue_all(struct net2272_ep *);
+static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
+static int net2272_fifo_status(struct usb_ep *);
+
+static struct usb_ep_ops net2272_ep_ops;
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct net2272 *dev;
+	struct net2272_ep *ep;
+	u32 max;
+	u8 tmp;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct net2272_ep, ep);
+	if (!_ep || !desc || ep->desc || _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	max = usb_endpoint_maxp(desc) & 0x1fff;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	_ep->maxpacket = max & 0x7fff;
+	ep->desc = desc;
+
+	/* net2272_ep_reset() has already been called */
+	ep->stopped = 0;
+	ep->wedged = 0;
+
+	/* set speed-dependent max packet */
+	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
+	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
+
+	/* set type, direction, address; reset fifo counters */
+	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+	tmp = usb_endpoint_type(desc);
+	if (usb_endpoint_xfer_bulk(desc)) {
+		/* catch some particularly blatant driver bugs */
+		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
+		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			return -ERANGE;
+		}
+	}
+	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
+	tmp <<= ENDPOINT_TYPE;
+	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
+	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
+	tmp |= (1 << ENDPOINT_ENABLE);
+
+	/* for OUT transfers, block the rx fifo until a read is posted */
+	ep->is_in = usb_endpoint_dir_in(desc);
+	if (!ep->is_in)
+		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+
+	net2272_ep_write(ep, EP_CFG, tmp);
+
+	/* enable irqs */
+	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
+	net2272_write(dev, IRQENB0, tmp);
+
+	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+		| net2272_ep_read(ep, EP_IRQENB);
+	net2272_ep_write(ep, EP_IRQENB, tmp);
+
+	tmp = desc->bEndpointAddress;
+	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
+		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
+		type_string(desc->bmAttributes), max,
+		net2272_ep_read(ep, EP_CFG));
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+static void net2272_ep_reset(struct net2272_ep *ep)
+{
+	u8 tmp;
+
+	ep->desc = NULL;
+	INIT_LIST_HEAD(&ep->queue);
+
+	ep->ep.maxpacket = ~0;
+	ep->ep.ops = &net2272_ep_ops;
+
+	/* disable irqs, endpoint */
+	net2272_ep_write(ep, EP_IRQENB, 0);
+
+	/* init to our chosen defaults, notably so that we NAK OUT
+	 * packets until the driver queues a read.
+	 */
+	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
+	net2272_ep_write(ep, EP_RSPSET, tmp);
+
+	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
+	if (ep->num != 0)
+		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
+
+	net2272_ep_write(ep, EP_RSPCLR, tmp);
+
+	/* scrub most status bits, and flush any fifo state */
+	net2272_ep_write(ep, EP_STAT0,
+			  (1 << DATA_IN_TOKEN_INTERRUPT)
+			| (1 << DATA_OUT_TOKEN_INTERRUPT)
+			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
+
+	net2272_ep_write(ep, EP_STAT1,
+			    (1 << TIMEOUT)
+			  | (1 << USB_OUT_ACK_SENT)
+			  | (1 << USB_OUT_NAK_SENT)
+			  | (1 << USB_IN_ACK_RCVD)
+			  | (1 << USB_IN_NAK_SENT)
+			  | (1 << USB_STALL_SENT)
+			  | (1 << LOCAL_OUT_ZLP)
+			  | (1 << BUFFER_FLUSH));
+
+	/* fifo size is handled seperately */
+}
+
+static int net2272_disable(struct usb_ep *_ep)
+{
+	struct net2272_ep *ep;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct net2272_ep, ep);
+	if (!_ep || !ep->desc || _ep->name == ep0name)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+	net2272_dequeue_all(ep);
+	net2272_ep_reset(ep);
+
+	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
+
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+	return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static struct usb_request *
+net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct net2272_ep *ep;
+	struct net2272_request *req;
+
+	if (!_ep)
+		return NULL;
+	ep = container_of(_ep, struct net2272_ep, ep);
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void
+net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct net2272_ep *ep;
+	struct net2272_request *req;
+
+	ep = container_of(_ep, struct net2272_ep, ep);
+	if (!_ep || !_req)
+		return;
+
+	req = container_of(_req, struct net2272_request, req);
+	WARN_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+static void
+net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
+{
+	struct net2272 *dev;
+	unsigned stopped = ep->stopped;
+
+	if (ep->num == 0) {
+		if (ep->dev->protocol_stall) {
+			ep->stopped = 1;
+			set_halt(ep);
+		}
+		allow_status(ep);
+	}
+
+	list_del_init(&req->queue);
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	dev = ep->dev;
+	if (use_dma && ep->dma)
+		usb_gadget_unmap_request(&dev->gadget, &req->req,
+				ep->is_in);
+
+	if (status && status != -ESHUTDOWN)
+		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length, req->req.buf);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock(&dev->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&dev->lock);
+	ep->stopped = stopped;
+}
+
+static int
+net2272_write_packet(struct net2272_ep *ep, u8 *buf,
+	struct net2272_request *req, unsigned max)
+{
+	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
+	u16 *bufp;
+	unsigned length, count;
+	u8 tmp;
+
+	length = min(req->req.length - req->req.actual, max);
+	req->req.actual += length;
+
+	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
+		ep->ep.name, req, max, length,
+		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
+
+	count = length;
+	bufp = (u16 *)buf;
+
+	while (likely(count >= 2)) {
+		/* no byte-swap required; chip endian set during init */
+		writew(*bufp++, ep_data);
+		count -= 2;
+	}
+	buf = (u8 *)bufp;
+
+	/* write final byte by placing the NET2272 into 8-bit mode */
+	if (unlikely(count)) {
+		tmp = net2272_read(ep->dev, LOCCTL);
+		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
+		writeb(*buf, ep_data);
+		net2272_write(ep->dev, LOCCTL, tmp);
+	}
+	return length;
+}
+
+/* returns: 0: still running, 1: completed, negative: errno */
+static int
+net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
+{
+	u8 *buf;
+	unsigned count, max;
+	int status;
+
+	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
+		ep->ep.name, req->req.actual, req->req.length);
+
+	/*
+	 * Keep loading the endpoint until the final packet is loaded,
+	 * or the endpoint buffer is full.
+	 */
+ top:
+	/*
+	 * Clear interrupt status
+	 *  - Packet Transmitted interrupt will become set again when the
+	 *    host successfully takes another packet
+	 */
+	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
+	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
+		buf = req->req.buf + req->req.actual;
+		prefetch(buf);
+
+		/* force pagesel */
+		net2272_ep_read(ep, EP_STAT0);
+
+		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
+			(net2272_ep_read(ep, EP_AVAIL0));
+
+		if (max < ep->ep.maxpacket)
+			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
+				| (net2272_ep_read(ep, EP_AVAIL0));
+
+		count = net2272_write_packet(ep, buf, req, max);
+		/* see if we are done */
+		if (req->req.length == req->req.actual) {
+			/* validate short or zlp packet */
+			if (count < ep->ep.maxpacket)
+				set_fifo_bytecount(ep, 0);
+			net2272_done(ep, req, 0);
+
+			if (!list_empty(&ep->queue)) {
+				req = list_entry(ep->queue.next,
+						struct net2272_request,
+						queue);
+				status = net2272_kick_dma(ep, req);
+
+				if (status < 0)
+					if ((net2272_ep_read(ep, EP_STAT0)
+							& (1 << BUFFER_EMPTY)))
+						goto top;
+			}
+			return 1;
+		}
+		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
+	}
+	return 0;
+}
+
+static void
+net2272_out_flush(struct net2272_ep *ep)
+{
+	ASSERT_OUT_NAKING(ep);
+
+	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
+			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
+	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+}
+
+static int
+net2272_read_packet(struct net2272_ep *ep, u8 *buf,
+	struct net2272_request *req, unsigned avail)
+{
+	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
+	unsigned is_short;
+	u16 *bufp;
+
+	req->req.actual += avail;
+
+	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
+		ep->ep.name, req, avail,
+		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
+
+	is_short = (avail < ep->ep.maxpacket);
+
+	if (unlikely(avail == 0)) {
+		/* remove any zlp from the buffer */
+		(void)readw(ep_data);
+		return is_short;
+	}
+
+	/* Ensure we get the final byte */
+	if (unlikely(avail % 2))
+		avail++;
+	bufp = (u16 *)buf;
+
+	do {
+		*bufp++ = readw(ep_data);
+		avail -= 2;
+	} while (avail);
+
+	/*
+	 * To avoid false endpoint available race condition must read
+	 * ep stat0 twice in the case of a short transfer
+	 */
+	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
+		net2272_ep_read(ep, EP_STAT0);
+
+	return is_short;
+}
+
+static int
+net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
+{
+	u8 *buf;
+	unsigned is_short;
+	int count;
+	int tmp;
+	int cleanup = 0;
+	int status = -1;
+
+	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
+		ep->ep.name, req->req.actual, req->req.length);
+
+ top:
+	do {
+		buf = req->req.buf + req->req.actual;
+		prefetchw(buf);
+
+		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
+			| net2272_ep_read(ep, EP_AVAIL0);
+
+		net2272_ep_write(ep, EP_STAT0,
+			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
+			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
+
+		tmp = req->req.length - req->req.actual;
+
+		if (count > tmp) {
+			if ((tmp % ep->ep.maxpacket) != 0) {
+				dev_err(ep->dev->dev,
+					"%s out fifo %d bytes, expected %d\n",
+					ep->ep.name, count, tmp);
+				cleanup = 1;
+			}
+			count = (tmp > 0) ? tmp : 0;
+		}
+
+		is_short = net2272_read_packet(ep, buf, req, count);
+
+		/* completion */
+		if (unlikely(cleanup || is_short ||
+				((req->req.actual == req->req.length)
+				 && !req->req.zero))) {
+
+			if (cleanup) {
+				net2272_out_flush(ep);
+				net2272_done(ep, req, -EOVERFLOW);
+			} else
+				net2272_done(ep, req, 0);
+
+			/* re-initialize endpoint transfer registers
+			 * otherwise they may result in erroneous pre-validation
+			 * for subsequent control reads
+			 */
+			if (unlikely(ep->num == 0)) {
+				net2272_ep_write(ep, EP_TRANSFER2, 0);
+				net2272_ep_write(ep, EP_TRANSFER1, 0);
+				net2272_ep_write(ep, EP_TRANSFER0, 0);
+			}
+
+			if (!list_empty(&ep->queue)) {
+				req = list_entry(ep->queue.next,
+					struct net2272_request, queue);
+				status = net2272_kick_dma(ep, req);
+				if ((status < 0) &&
+				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
+					goto top;
+			}
+			return 1;
+		}
+	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
+
+	return 0;
+}
+
+static void
+net2272_pio_advance(struct net2272_ep *ep)
+{
+	struct net2272_request *req;
+
+	if (unlikely(list_empty(&ep->queue)))
+		return;
+
+	req = list_entry(ep->queue.next, struct net2272_request, queue);
+	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
+}
+
+/* returns 0 on success, else negative errno */
+static int
+net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
+	unsigned len, unsigned dir)
+{
+	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
+		ep, buf, len, dir);
+
+	/* The NET2272 only supports a single dma channel */
+	if (dev->dma_busy)
+		return -EBUSY;
+	/*
+	 * EP_TRANSFER (used to determine the number of bytes received
+	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
+	 */
+	if ((dir == 1) && (len > 0x1000000))
+		return -EINVAL;
+
+	dev->dma_busy = 1;
+
+	/* initialize platform's dma */
+#ifdef CONFIG_PCI
+	/* NET2272 addr, buffer addr, length, etc. */
+	switch (dev->dev_id) {
+	case PCI_DEVICE_ID_RDK1:
+		/* Setup PLX 9054 DMA mode */
+		writel((1 << LOCAL_BUS_WIDTH) |
+			(1 << TA_READY_INPUT_ENABLE) |
+			(0 << LOCAL_BURST_ENABLE) |
+			(1 << DONE_INTERRUPT_ENABLE) |
+			(1 << LOCAL_ADDRESSING_MODE) |
+			(1 << DEMAND_MODE) |
+			(1 << DMA_EOT_ENABLE) |
+			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
+			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
+			dev->rdk1.plx9054_base_addr + DMAMODE0);
+
+		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
+		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
+		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
+		writel((dir << DIRECTION_OF_TRANSFER) |
+			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
+			dev->rdk1.plx9054_base_addr + DMADPR0);
+		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
+			readl(dev->rdk1.plx9054_base_addr + INTCSR),
+			dev->rdk1.plx9054_base_addr + INTCSR);
+
+		break;
+	}
+#endif
+
+	net2272_write(dev, DMAREQ,
+		(0 << DMA_BUFFER_VALID) |
+		(1 << DMA_REQUEST_ENABLE) |
+		(1 << DMA_CONTROL_DACK) |
+		(dev->dma_eot_polarity << EOT_POLARITY) |
+		(dev->dma_dack_polarity << DACK_POLARITY) |
+		(dev->dma_dreq_polarity << DREQ_POLARITY) |
+		((ep >> 1) << DMA_ENDPOINT_SELECT));
+
+	(void) net2272_read(dev, SCRATCH);
+
+	return 0;
+}
+
+static void
+net2272_start_dma(struct net2272 *dev)
+{
+	/* start platform's dma controller */
+#ifdef CONFIG_PCI
+	switch (dev->dev_id) {
+	case PCI_DEVICE_ID_RDK1:
+		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
+			dev->rdk1.plx9054_base_addr + DMACSR0);
+		break;
+	}
+#endif
+}
+
+/* returns 0 on success, else negative errno */
+static int
+net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
+{
+	unsigned size;
+	u8 tmp;
+
+	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
+		return -EINVAL;
+
+	/* don't use dma for odd-length transfers
+	 * otherwise, we'd need to deal with the last byte with pio
+	 */
+	if (req->req.length & 1)
+		return -EINVAL;
+
+	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
+		ep->ep.name, req, (unsigned long long) req->req.dma);
+
+	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+
+	/* The NET2272 can only use DMA on one endpoint at a time */
+	if (ep->dev->dma_busy)
+		return -EBUSY;
+
+	/* Make sure we only DMA an even number of bytes (we'll use
+	 * pio to complete the transfer)
+	 */
+	size = req->req.length;
+	size &= ~1;
+
+	/* device-to-host transfer */
+	if (ep->is_in) {
+		/* initialize platform's dma controller */
+		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
+			/* unable to obtain DMA channel; return error and use pio mode */
+			return -EBUSY;
+		req->req.actual += size;
+
+	/* host-to-device transfer */
+	} else {
+		tmp = net2272_ep_read(ep, EP_STAT0);
+
+		/* initialize platform's dma controller */
+		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
+			/* unable to obtain DMA channel; return error and use pio mode */
+			return -EBUSY;
+
+		if (!(tmp & (1 << BUFFER_EMPTY)))
+			ep->not_empty = 1;
+		else
+			ep->not_empty = 0;
+
+
+		/* allow the endpoint's buffer to fill */
+		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+
+		/* this transfer completed and data's already in the fifo
+		 * return error so pio gets used.
+		 */
+		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
+
+			/* deassert dreq */
+			net2272_write(ep->dev, DMAREQ,
+				(0 << DMA_BUFFER_VALID) |
+				(0 << DMA_REQUEST_ENABLE) |
+				(1 << DMA_CONTROL_DACK) |
+				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
+				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
+				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
+				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
+
+			return -EBUSY;
+		}
+	}
+
+	/* Don't use per-packet interrupts: use dma interrupts only */
+	net2272_ep_write(ep, EP_IRQENB, 0);
+
+	net2272_start_dma(ep->dev);
+
+	return 0;
+}
+
+static void net2272_cancel_dma(struct net2272 *dev)
+{
+#ifdef CONFIG_PCI
+	switch (dev->dev_id) {
+	case PCI_DEVICE_ID_RDK1:
+		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
+		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
+		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
+		         (1 << CHANNEL_DONE)))
+			continue;	/* wait for dma to stabalize */
+
+		/* dma abort generates an interrupt */
+		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
+			dev->rdk1.plx9054_base_addr + DMACSR0);
+		break;
+	}
+#endif
+
+	dev->dma_busy = 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct net2272_request *req;
+	struct net2272_ep *ep;
+	struct net2272 *dev;
+	unsigned long flags;
+	int status = -1;
+	u8 s;
+
+	req = container_of(_req, struct net2272_request, req);
+	if (!_req || !_req->complete || !_req->buf
+			|| !list_empty(&req->queue))
+		return -EINVAL;
+	ep = container_of(_ep, struct net2272_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -EINVAL;
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	/* set up dma mapping in case the caller didn't */
+	if (use_dma && ep->dma) {
+		status = usb_gadget_map_request(&dev->gadget, _req,
+				ep->is_in);
+		if (status)
+			return status;
+	}
+
+	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
+		_ep->name, _req, _req->length, _req->buf,
+		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/* kickstart this i/o queue? */
+	if (list_empty(&ep->queue) && !ep->stopped) {
+		/* maybe there's no control data, just status ack */
+		if (ep->num == 0 && _req->length == 0) {
+			net2272_done(ep, req, 0);
+			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
+			goto done;
+		}
+
+		/* Return zlp, don't let it block subsequent packets */
+		s = net2272_ep_read(ep, EP_STAT0);
+		if (s & (1 << BUFFER_EMPTY)) {
+			/* Buffer is empty check for a blocking zlp, handle it */
+			if ((s & (1 << NAK_OUT_PACKETS)) &&
+			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
+				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
+				/*
+				 * Request is going to terminate with a short packet ...
+				 * hope the client is ready for it!
+				 */
+				status = net2272_read_fifo(ep, req);
+				/* clear short packet naking */
+				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
+				goto done;
+			}
+		}
+
+		/* try dma first */
+		status = net2272_kick_dma(ep, req);
+
+		if (status < 0) {
+			/* dma failed (most likely in use by another endpoint)
+			 * fallback to pio
+			 */
+			status = 0;
+
+			if (ep->is_in)
+				status = net2272_write_fifo(ep, req);
+			else {
+				s = net2272_ep_read(ep, EP_STAT0);
+				if ((s & (1 << BUFFER_EMPTY)) == 0)
+					status = net2272_read_fifo(ep, req);
+			}
+
+			if (unlikely(status != 0)) {
+				if (status > 0)
+					status = 0;
+				req = NULL;
+			}
+		}
+	}
+	if (likely(req != 0))
+		list_add_tail(&req->queue, &ep->queue);
+
+	if (likely(!list_empty(&ep->queue)))
+		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+ done:
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+/* dequeue ALL requests */
+static void
+net2272_dequeue_all(struct net2272_ep *ep)
+{
+	struct net2272_request *req;
+
+	/* called with spinlock held */
+	ep->stopped = 1;
+
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next,
+				struct net2272_request,
+				queue);
+		net2272_done(ep, req, -ESHUTDOWN);
+	}
+}
+
+/* dequeue JUST ONE request */
+static int
+net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct net2272_ep *ep;
+	struct net2272_request *req;
+	unsigned long flags;
+	int stopped;
+
+	ep = container_of(_ep, struct net2272_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+	stopped = ep->stopped;
+	ep->stopped = 1;
+
+	/* make sure it's still queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore(&ep->dev->lock, flags);
+		return -EINVAL;
+	}
+
+	/* queue head may be partially complete */
+	if (ep->queue.next == &req->queue) {
+		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
+		net2272_done(ep, req, -ECONNRESET);
+	}
+	req = NULL;
+	ep->stopped = stopped;
+
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+	return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+	struct net2272_ep *ep;
+	unsigned long flags;
+	int ret = 0;
+
+	ep = container_of(_ep, struct net2272_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -EINVAL;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->dev->lock, flags);
+	if (!list_empty(&ep->queue))
+		ret = -EAGAIN;
+	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
+		ret = -EAGAIN;
+	else {
+		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
+			value ? "set" : "clear",
+			wedged ? "wedge" : "halt");
+		/* set/clear */
+		if (value) {
+			if (ep->num == 0)
+				ep->dev->protocol_stall = 1;
+			else
+				set_halt(ep);
+			if (wedged)
+				ep->wedged = 1;
+		} else {
+			clear_halt(ep);
+			ep->wedged = 0;
+		}
+	}
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+	return ret;
+}
+
+static int
+net2272_set_halt(struct usb_ep *_ep, int value)
+{
+	return net2272_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int
+net2272_set_wedge(struct usb_ep *_ep)
+{
+	if (!_ep || _ep->name == ep0name)
+		return -EINVAL;
+	return net2272_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static int
+net2272_fifo_status(struct usb_ep *_ep)
+{
+	struct net2272_ep *ep;
+	u16 avail;
+
+	ep = container_of(_ep, struct net2272_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -ENODEV;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
+	avail |= net2272_ep_read(ep, EP_AVAIL0);
+	if (avail > ep->fifo_size)
+		return -EOVERFLOW;
+	if (ep->is_in)
+		avail = ep->fifo_size - avail;
+	return avail;
+}
+
+static void
+net2272_fifo_flush(struct usb_ep *_ep)
+{
+	struct net2272_ep *ep;
+
+	ep = container_of(_ep, struct net2272_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return;
+
+	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+}
+
+static struct usb_ep_ops net2272_ep_ops = {
+	.enable        = net2272_enable,
+	.disable       = net2272_disable,
+
+	.alloc_request = net2272_alloc_request,
+	.free_request  = net2272_free_request,
+
+	.queue         = net2272_queue,
+	.dequeue       = net2272_dequeue,
+
+	.set_halt      = net2272_set_halt,
+	.set_wedge     = net2272_set_wedge,
+	.fifo_status   = net2272_fifo_status,
+	.fifo_flush    = net2272_fifo_flush,
+};
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_get_frame(struct usb_gadget *_gadget)
+{
+	struct net2272 *dev;
+	unsigned long flags;
+	u16 ret;
+
+	if (!_gadget)
+		return -ENODEV;
+	dev = container_of(_gadget, struct net2272, gadget);
+	spin_lock_irqsave(&dev->lock, flags);
+
+	ret = net2272_read(dev, FRAME1) << 8;
+	ret |= net2272_read(dev, FRAME0);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return ret;
+}
+
+static int
+net2272_wakeup(struct usb_gadget *_gadget)
+{
+	struct net2272 *dev;
+	u8 tmp;
+	unsigned long flags;
+
+	if (!_gadget)
+		return 0;
+	dev = container_of(_gadget, struct net2272, gadget);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	tmp = net2272_read(dev, USBCTL0);
+	if (tmp & (1 << IO_WAKEUP_ENABLE))
+		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+static int
+net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
+{
+	struct net2272 *dev;
+
+	if (!_gadget)
+		return -ENODEV;
+	dev = container_of(_gadget, struct net2272, gadget);
+
+	dev->is_selfpowered = value;
+
+	return 0;
+}
+
+static int
+net2272_pullup(struct usb_gadget *_gadget, int is_on)
+{
+	struct net2272 *dev;
+	u8 tmp;
+	unsigned long flags;
+
+	if (!_gadget)
+		return -ENODEV;
+	dev = container_of(_gadget, struct net2272, gadget);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	tmp = net2272_read(dev, USBCTL0);
+	dev->softconnect = (is_on != 0);
+	if (is_on)
+		tmp |= (1 << USB_DETECT_ENABLE);
+	else
+		tmp &= ~(1 << USB_DETECT_ENABLE);
+	net2272_write(dev, USBCTL0, tmp);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+static int net2272_start(struct usb_gadget *_gadget,
+		struct usb_gadget_driver *driver);
+static int net2272_stop(struct usb_gadget *_gadget,
+		struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops net2272_ops = {
+	.get_frame	= net2272_get_frame,
+	.wakeup		= net2272_wakeup,
+	.set_selfpowered = net2272_set_selfpowered,
+	.pullup		= net2272_pullup,
+	.udc_start	= net2272_start,
+	.udc_stop	= net2272_stop,
+};
+
+/*---------------------------------------------------------------------------*/
+
+static ssize_t
+net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct net2272 *dev;
+	char *next;
+	unsigned size, t;
+	unsigned long flags;
+	u8 t1, t2;
+	int i;
+	const char *s;
+
+	dev = dev_get_drvdata(_dev);
+	next = buf;
+	size = PAGE_SIZE;
+	spin_lock_irqsave(&dev->lock, flags);
+
+	if (dev->driver)
+		s = dev->driver->driver.name;
+	else
+		s = "(none)";
+
+	/* Main Control Registers */
+	t = scnprintf(next, size, "%s version %s,"
+		"chiprev %02x, locctl %02x\n"
+		"irqenb0 %02x irqenb1 %02x "
+		"irqstat0 %02x irqstat1 %02x\n",
+		driver_name, driver_vers, dev->chiprev,
+		net2272_read(dev, LOCCTL),
+		net2272_read(dev, IRQENB0),
+		net2272_read(dev, IRQENB1),
+		net2272_read(dev, IRQSTAT0),
+		net2272_read(dev, IRQSTAT1));
+	size -= t;
+	next += t;
+
+	/* DMA */
+	t1 = net2272_read(dev, DMAREQ);
+	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
+		t1, ep_name[(t1 & 0x01) + 1],
+		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
+		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
+		t1 & (1 << DMA_REQUEST) ? "req " : "",
+		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
+	size -= t;
+	next += t;
+
+	/* USB Control Registers */
+	t1 = net2272_read(dev, USBCTL1);
+	if (t1 & (1 << VBUS_PIN)) {
+		if (t1 & (1 << USB_HIGH_SPEED))
+			s = "high speed";
+		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+			s = "powered";
+		else
+			s = "full speed";
+	} else
+		s = "not attached";
+	t = scnprintf(next, size,
+		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
+		net2272_read(dev, USBCTL0), t1,
+		net2272_read(dev, OURADDR), s);
+	size -= t;
+	next += t;
+
+	/* Endpoint Registers */
+	for (i = 0; i < 4; ++i) {
+		struct net2272_ep *ep;
+
+		ep = &dev->ep[i];
+		if (i && !ep->desc)
+			continue;
+
+		t1 = net2272_ep_read(ep, EP_CFG);
+		t2 = net2272_ep_read(ep, EP_RSPSET);
+		t = scnprintf(next, size,
+			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
+			"irqenb %02x\n",
+			ep->ep.name, t1, t2,
+			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
+			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
+			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
+			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
+			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
+			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
+			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
+			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
+			net2272_ep_read(ep, EP_IRQENB));
+		size -= t;
+		next += t;
+
+		t = scnprintf(next, size,
+			"\tstat0 %02x stat1 %02x avail %04x "
+			"(ep%d%s-%s)%s\n",
+			net2272_ep_read(ep, EP_STAT0),
+			net2272_ep_read(ep, EP_STAT1),
+			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
+			t1 & 0x0f,
+			ep->is_in ? "in" : "out",
+			type_string(t1 >> 5),
+			ep->stopped ? "*" : "");
+		size -= t;
+		next += t;
+
+		t = scnprintf(next, size,
+			"\tep_transfer %06x\n",
+			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
+			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
+			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
+		size -= t;
+		next += t;
+
+		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
+		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
+		t = scnprintf(next, size,
+			"\tbuf-a %s buf-b %s\n",
+			buf_state_string(t1),
+			buf_state_string(t2));
+		size -= t;
+		next += t;
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_set_fifo_mode(struct net2272 *dev, int mode)
+{
+	u8 tmp;
+
+	tmp = net2272_read(dev, LOCCTL) & 0x3f;
+	tmp |= (mode << 6);
+	net2272_write(dev, LOCCTL, tmp);
+
+	INIT_LIST_HEAD(&dev->gadget.ep_list);
+
+	/* always ep-a, ep-c ... maybe not ep-b */
+	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
+
+	switch (mode) {
+	case 0:
+		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
+		break;
+	case 1:
+		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+		dev->ep[1].fifo_size = 1024;
+		dev->ep[2].fifo_size = 512;
+		break;
+	case 2:
+		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
+		break;
+	case 3:
+		dev->ep[1].fifo_size = 1024;
+		break;
+	}
+
+	/* ep-c is always 2 512 byte buffers */
+	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
+	dev->ep[3].fifo_size = 512;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_usb_reset(struct net2272 *dev)
+{
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+	net2272_cancel_dma(dev);
+
+	net2272_write(dev, IRQENB0, 0);
+	net2272_write(dev, IRQENB1, 0);
+
+	/* clear irq state */
+	net2272_write(dev, IRQSTAT0, 0xff);
+	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
+
+	net2272_write(dev, DMAREQ,
+		(0 << DMA_BUFFER_VALID) |
+		(0 << DMA_REQUEST_ENABLE) |
+		(1 << DMA_CONTROL_DACK) |
+		(dev->dma_eot_polarity << EOT_POLARITY) |
+		(dev->dma_dack_polarity << DACK_POLARITY) |
+		(dev->dma_dreq_polarity << DREQ_POLARITY) |
+		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
+
+	net2272_cancel_dma(dev);
+	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
+
+	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
+	 * note that the higher level gadget drivers are expected to convert data to little endian.
+	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
+	 */
+	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
+	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
+}
+
+static void
+net2272_usb_reinit(struct net2272 *dev)
+{
+	int i;
+
+	/* basic endpoint init */
+	for (i = 0; i < 4; ++i) {
+		struct net2272_ep *ep = &dev->ep[i];
+
+		ep->ep.name = ep_name[i];
+		ep->dev = dev;
+		ep->num = i;
+		ep->not_empty = 0;
+
+		if (use_dma && ep->num == dma_ep)
+			ep->dma = 1;
+
+		if (i > 0 && i <= 3)
+			ep->fifo_size = 512;
+		else
+			ep->fifo_size = 64;
+		net2272_ep_reset(ep);
+	}
+	dev->ep[0].ep.maxpacket = 64;
+
+	dev->gadget.ep0 = &dev->ep[0].ep;
+	dev->ep[0].stopped = 0;
+	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+}
+
+static void
+net2272_ep0_start(struct net2272 *dev)
+{
+	struct net2272_ep *ep0 = &dev->ep[0];
+
+	net2272_ep_write(ep0, EP_RSPSET,
+		(1 << NAK_OUT_PACKETS_MODE) |
+		(1 << ALT_NAK_OUT_PACKETS));
+	net2272_ep_write(ep0, EP_RSPCLR,
+		(1 << HIDE_STATUS_PHASE) |
+		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
+	net2272_write(dev, USBCTL0,
+		(dev->softconnect << USB_DETECT_ENABLE) |
+		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
+		(1 << IO_WAKEUP_ENABLE));
+	net2272_write(dev, IRQENB0,
+		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
+		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
+		(1 << DMA_DONE_INTERRUPT_ENABLE));
+	net2272_write(dev, IRQENB1,
+		(1 << VBUS_INTERRUPT_ENABLE) |
+		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
+		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
+}
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int net2272_start(struct usb_gadget *_gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct net2272 *dev;
+	unsigned i;
+
+	if (!driver || !driver->unbind || !driver->setup ||
+	    driver->max_speed != USB_SPEED_HIGH)
+		return -EINVAL;
+
+	dev = container_of(_gadget, struct net2272, gadget);
+
+	for (i = 0; i < 4; ++i)
+		dev->ep[i].irqs = 0;
+	/* hook up the driver ... */
+	dev->softconnect = 1;
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+
+	/* ... then enable host detection and ep0; and we're ready
+	 * for set_configuration as well as eventual disconnect.
+	 */
+	net2272_ep0_start(dev);
+
+	dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
+
+	return 0;
+}
+
+static void
+stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
+{
+	int i;
+
+	/* don't disconnect if it's not connected */
+	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+
+	/* stop hardware; prevent new request submissions;
+	 * and kill any outstanding requests.
+	 */
+	net2272_usb_reset(dev);
+	for (i = 0; i < 4; ++i)
+		net2272_dequeue_all(&dev->ep[i]);
+
+	net2272_usb_reinit(dev);
+}
+
+static int net2272_stop(struct usb_gadget *_gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct net2272 *dev;
+	unsigned long flags;
+
+	dev = container_of(_gadget, struct net2272, gadget);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	stop_activity(dev, driver);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
+	return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+/* handle ep-a/ep-b dma completions */
+static void
+net2272_handle_dma(struct net2272_ep *ep)
+{
+	struct net2272_request *req;
+	unsigned len;
+	int status;
+
+	if (!list_empty(&ep->queue))
+		req = list_entry(ep->queue.next,
+				struct net2272_request, queue);
+	else
+		req = NULL;
+
+	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
+
+	/* Ensure DREQ is de-asserted */
+	net2272_write(ep->dev, DMAREQ,
+		(0 << DMA_BUFFER_VALID)
+	      | (0 << DMA_REQUEST_ENABLE)
+	      | (1 << DMA_CONTROL_DACK)
+	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
+	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
+	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
+	      | ((ep->dma >> 1) << DMA_ENDPOINT_SELECT));
+
+	ep->dev->dma_busy = 0;
+
+	net2272_ep_write(ep, EP_IRQENB,
+		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+		| net2272_ep_read(ep, EP_IRQENB));
+
+	/* device-to-host transfer completed */
+	if (ep->is_in) {
+		/* validate a short packet or zlp if necessary */
+		if ((req->req.length % ep->ep.maxpacket != 0) ||
+				req->req.zero)
+			set_fifo_bytecount(ep, 0);
+
+		net2272_done(ep, req, 0);
+		if (!list_empty(&ep->queue)) {
+			req = list_entry(ep->queue.next,
+					struct net2272_request, queue);
+			status = net2272_kick_dma(ep, req);
+			if (status < 0)
+				net2272_pio_advance(ep);
+		}
+
+	/* host-to-device transfer completed */
+	} else {
+		/* terminated with a short packet? */
+		if (net2272_read(ep->dev, IRQSTAT0) &
+				(1 << DMA_DONE_INTERRUPT)) {
+			/* abort system dma */
+			net2272_cancel_dma(ep->dev);
+		}
+
+		/* EP_TRANSFER will contain the number of bytes
+		 * actually received.
+		 * NOTE: There is no overflow detection on EP_TRANSFER:
+		 * We can't deal with transfers larger than 2^24 bytes!
+		 */
+		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
+			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
+			| (net2272_ep_read(ep, EP_TRANSFER0));
+
+		if (ep->not_empty)
+			len += 4;
+
+		req->req.actual += len;
+
+		/* get any remaining data */
+		net2272_pio_advance(ep);
+	}
+}
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_handle_ep(struct net2272_ep *ep)
+{
+	struct net2272_request *req;
+	u8 stat0, stat1;
+
+	if (!list_empty(&ep->queue))
+		req = list_entry(ep->queue.next,
+			struct net2272_request, queue);
+	else
+		req = NULL;
+
+	/* ack all, and handle what we care about */
+	stat0 = net2272_ep_read(ep, EP_STAT0);
+	stat1 = net2272_ep_read(ep, EP_STAT1);
+	ep->irqs++;
+
+	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
+		ep->ep.name, stat0, stat1, req ? &req->req : 0);
+
+	net2272_ep_write(ep, EP_STAT0, stat0 &
+		~((1 << NAK_OUT_PACKETS)
+		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
+	net2272_ep_write(ep, EP_STAT1, stat1);
+
+	/* data packet(s) received (in the fifo, OUT)
+	 * direction must be validated, otherwise control read status phase
+	 * could be interpreted as a valid packet
+	 */
+	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
+		net2272_pio_advance(ep);
+	/* data packet(s) transmitted (IN) */
+	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
+		net2272_pio_advance(ep);
+}
+
+static struct net2272_ep *
+net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
+{
+	struct net2272_ep *ep;
+
+	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+		return &dev->ep[0];
+
+	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+		u8 bEndpointAddress;
+
+		if (!ep->desc)
+			continue;
+		bEndpointAddress = ep->desc->bEndpointAddress;
+		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+			continue;
+		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
+			return ep;
+	}
+	return NULL;
+}
+
+/*
+ * USB Test Packet:
+ * JKJKJKJK * 9
+ * JJKKJJKK * 8
+ * JJJJKKKK * 8
+ * JJJJJJJKKKKKKK * 8
+ * JJJJJJJK * 8
+ * {JKKKKKKK * 10}, JK
+ */
+static const u8 net2272_test_packet[] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
+	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
+	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
+};
+
+static void
+net2272_set_test_mode(struct net2272 *dev, int mode)
+{
+	int i;
+
+	/* Disable all net2272 interrupts:
+	 * Nothing but a power cycle should stop the test.
+	 */
+	net2272_write(dev, IRQENB0, 0x00);
+	net2272_write(dev, IRQENB1, 0x00);
+
+	/* Force tranceiver to high-speed */
+	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
+
+	net2272_write(dev, PAGESEL, 0);
+	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
+	net2272_write(dev, EP_RSPCLR,
+			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
+			| (1 << HIDE_STATUS_PHASE));
+	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
+	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
+
+	/* wait for status phase to complete */
+	while (!(net2272_read(dev, EP_STAT0) &
+				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
+		;
+
+	/* Enable test mode */
+	net2272_write(dev, USBTEST, mode);
+
+	/* load test packet */
+	if (mode == TEST_PACKET) {
+		/* switch to 8 bit mode */
+		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
+				~(1 << DATA_WIDTH));
+
+		for (i = 0; i < sizeof(net2272_test_packet); ++i)
+			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
+
+		/* Validate test packet */
+		net2272_write(dev, EP_TRANSFER0, 0);
+	}
+}
+
+static void
+net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
+{
+	struct net2272_ep *ep;
+	u8 num, scratch;
+
+	/* starting a control request? */
+	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
+		union {
+			u8 raw[8];
+			struct usb_ctrlrequest	r;
+		} u;
+		int tmp = 0;
+		struct net2272_request *req;
+
+		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
+			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
+				dev->gadget.speed = USB_SPEED_HIGH;
+			else
+				dev->gadget.speed = USB_SPEED_FULL;
+			dev_dbg(dev->dev, "%s\n",
+				usb_speed_string(dev->gadget.speed));
+		}
+
+		ep = &dev->ep[0];
+		ep->irqs++;
+
+		/* make sure any leftover interrupt state is cleared */
+		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
+		while (!list_empty(&ep->queue)) {
+			req = list_entry(ep->queue.next,
+				struct net2272_request, queue);
+			net2272_done(ep, req,
+				(req->req.actual == req->req.length) ? 0 : -EPROTO);
+		}
+		ep->stopped = 0;
+		dev->protocol_stall = 0;
+		net2272_ep_write(ep, EP_STAT0,
+			    (1 << DATA_IN_TOKEN_INTERRUPT)
+			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
+			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
+		net2272_ep_write(ep, EP_STAT1,
+			    (1 << TIMEOUT)
+			  | (1 << USB_OUT_ACK_SENT)
+			  | (1 << USB_OUT_NAK_SENT)
+			  | (1 << USB_IN_ACK_RCVD)
+			  | (1 << USB_IN_NAK_SENT)
+			  | (1 << USB_STALL_SENT)
+			  | (1 << LOCAL_OUT_ZLP));
+
+		/*
+		 * Ensure Control Read pre-validation setting is beyond maximum size
+		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
+		 *    an EP0 transfer following the Control Write is a Control Read,
+		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
+		 *    pre-validation count.
+		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
+		 *    the pre-validation count cannot cause an unexpected validatation
+		 */
+		net2272_write(dev, PAGESEL, 0);
+		net2272_write(dev, EP_TRANSFER2, 0xff);
+		net2272_write(dev, EP_TRANSFER1, 0xff);
+		net2272_write(dev, EP_TRANSFER0, 0xff);
+
+		u.raw[0] = net2272_read(dev, SETUP0);
+		u.raw[1] = net2272_read(dev, SETUP1);
+		u.raw[2] = net2272_read(dev, SETUP2);
+		u.raw[3] = net2272_read(dev, SETUP3);
+		u.raw[4] = net2272_read(dev, SETUP4);
+		u.raw[5] = net2272_read(dev, SETUP5);
+		u.raw[6] = net2272_read(dev, SETUP6);
+		u.raw[7] = net2272_read(dev, SETUP7);
+		/*
+		 * If you have a big endian cpu make sure le16_to_cpus
+		 * performs the proper byte swapping here...
+		 */
+		le16_to_cpus(&u.r.wValue);
+		le16_to_cpus(&u.r.wIndex);
+		le16_to_cpus(&u.r.wLength);
+
+		/* ack the irq */
+		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
+		stat ^= (1 << SETUP_PACKET_INTERRUPT);
+
+		/* watch control traffic at the token level, and force
+		 * synchronization before letting the status phase happen.
+		 */
+		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+		if (ep->is_in) {
+			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
+				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
+			stop_out_naking(ep);
+		} else
+			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
+				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
+		net2272_ep_write(ep, EP_IRQENB, scratch);
+
+		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
+			goto delegate;
+		switch (u.r.bRequest) {
+		case USB_REQ_GET_STATUS: {
+			struct net2272_ep *e;
+			u16 status = 0;
+
+			switch (u.r.bRequestType & USB_RECIP_MASK) {
+			case USB_RECIP_ENDPOINT:
+				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+				if (!e || u.r.wLength > 2)
+					goto do_stall;
+				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
+					status = __constant_cpu_to_le16(1);
+				else
+					status = __constant_cpu_to_le16(0);
+
+				/* don't bother with a request object! */
+				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+				writew(status, net2272_reg_addr(dev, EP_DATA));
+				set_fifo_bytecount(&dev->ep[0], 0);
+				allow_status(ep);
+				dev_vdbg(dev->dev, "%s stat %02x\n",
+					ep->ep.name, status);
+				goto next_endpoints;
+			case USB_RECIP_DEVICE:
+				if (u.r.wLength > 2)
+					goto do_stall;
+				if (dev->is_selfpowered)
+					status = (1 << USB_DEVICE_SELF_POWERED);
+
+				/* don't bother with a request object! */
+				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+				writew(status, net2272_reg_addr(dev, EP_DATA));
+				set_fifo_bytecount(&dev->ep[0], 0);
+				allow_status(ep);
+				dev_vdbg(dev->dev, "device stat %02x\n", status);
+				goto next_endpoints;
+			case USB_RECIP_INTERFACE:
+				if (u.r.wLength > 2)
+					goto do_stall;
+
+				/* don't bother with a request object! */
+				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+				writew(status, net2272_reg_addr(dev, EP_DATA));
+				set_fifo_bytecount(&dev->ep[0], 0);
+				allow_status(ep);
+				dev_vdbg(dev->dev, "interface status %02x\n", status);
+				goto next_endpoints;
+			}
+
+			break;
+		}
+		case USB_REQ_CLEAR_FEATURE: {
+			struct net2272_ep *e;
+
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (u.r.wValue != USB_ENDPOINT_HALT ||
+			    u.r.wLength != 0)
+				goto do_stall;
+			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+			if (!e)
+				goto do_stall;
+			if (e->wedged) {
+				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
+					ep->ep.name);
+			} else {
+				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
+				clear_halt(e);
+			}
+			allow_status(ep);
+			goto next_endpoints;
+		}
+		case USB_REQ_SET_FEATURE: {
+			struct net2272_ep *e;
+
+			if (u.r.bRequestType == USB_RECIP_DEVICE) {
+				if (u.r.wIndex != NORMAL_OPERATION)
+					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
+				allow_status(ep);
+				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
+				goto next_endpoints;
+			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (u.r.wValue != USB_ENDPOINT_HALT ||
+			    u.r.wLength != 0)
+				goto do_stall;
+			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+			if (!e)
+				goto do_stall;
+			set_halt(e);
+			allow_status(ep);
+			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
+			goto next_endpoints;
+		}
+		case USB_REQ_SET_ADDRESS: {
+			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
+			allow_status(ep);
+			break;
+		}
+		default:
+ delegate:
+			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
+				"ep_cfg %08x\n",
+				u.r.bRequestType, u.r.bRequest,
+				u.r.wValue, u.r.wIndex,
+				net2272_ep_read(ep, EP_CFG));
+			spin_unlock(&dev->lock);
+			tmp = dev->driver->setup(&dev->gadget, &u.r);
+			spin_lock(&dev->lock);
+		}
+
+		/* stall ep0 on error */
+		if (tmp < 0) {
+ do_stall:
+			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
+				u.r.bRequestType, u.r.bRequest, tmp);
+			dev->protocol_stall = 1;
+		}
+	/* endpoint dma irq? */
+	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
+		net2272_cancel_dma(dev);
+		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
+		stat &= ~(1 << DMA_DONE_INTERRUPT);
+		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
+			? 2 : 1;
+
+		ep = &dev->ep[num];
+		net2272_handle_dma(ep);
+	}
+
+ next_endpoints:
+	/* endpoint data irq? */
+	scratch = stat & 0x0f;
+	stat &= ~0x0f;
+	for (num = 0; scratch; num++) {
+		u8 t;
+
+		/* does this endpoint's FIFO and queue need tending? */
+		t = 1 << num;
+		if ((scratch & t) == 0)
+			continue;
+		scratch ^= t;
+
+		ep = &dev->ep[num];
+		net2272_handle_ep(ep);
+	}
+
+	/* some interrupts we can just ignore */
+	stat &= ~(1 << SOF_INTERRUPT);
+
+	if (stat)
+		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
+}
+
+static void
+net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
+{
+	u8 tmp, mask;
+
+	/* after disconnect there's nothing else to do! */
+	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
+	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
+
+	if (stat & tmp) {
+		net2272_write(dev, IRQSTAT1, tmp);
+		if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
+				((net2272_read(dev, USBCTL1) & mask) == 0))
+			|| ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
+				== 0))
+				&& (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
+			dev_dbg(dev->dev, "disconnect %s\n",
+				dev->driver->driver.name);
+			stop_activity(dev, dev->driver);
+			net2272_ep0_start(dev);
+			return;
+		}
+		stat &= ~tmp;
+
+		if (!stat)
+			return;
+	}
+
+	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
+	if (stat & tmp) {
+		net2272_write(dev, IRQSTAT1, tmp);
+		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
+			if (dev->driver->suspend)
+				dev->driver->suspend(&dev->gadget);
+			if (!enable_suspend) {
+				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
+				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
+			}
+		} else {
+			if (dev->driver->resume)
+				dev->driver->resume(&dev->gadget);
+		}
+		stat &= ~tmp;
+	}
+
+	/* clear any other status/irqs */
+	if (stat)
+		net2272_write(dev, IRQSTAT1, stat);
+
+	/* some status we can just ignore */
+	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
+			| (1 << SUSPEND_REQUEST_INTERRUPT)
+			| (1 << RESUME_INTERRUPT));
+	if (!stat)
+		return;
+	else
+		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
+}
+
+static irqreturn_t net2272_irq(int irq, void *_dev)
+{
+	struct net2272 *dev = _dev;
+#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
+	u32 intcsr;
+#endif
+#if defined(PLX_PCI_RDK)
+	u8 dmareq;
+#endif
+	spin_lock(&dev->lock);
+#if defined(PLX_PCI_RDK)
+	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
+
+	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
+		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
+				dev->rdk1.plx9054_base_addr + INTCSR);
+		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
+		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
+		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
+		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
+			dev->rdk1.plx9054_base_addr + INTCSR);
+	}
+	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
+		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
+				dev->rdk1.plx9054_base_addr + DMACSR0);
+
+		dmareq = net2272_read(dev, DMAREQ);
+		if (dmareq & 0x01)
+			net2272_handle_dma(&dev->ep[2]);
+		else
+			net2272_handle_dma(&dev->ep[1]);
+	}
+#endif
+#if defined(PLX_PCI_RDK2)
+	/* see if PCI int for us by checking irqstat */
+	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
+	if (!intcsr & (1 << NET2272_PCI_IRQ))
+		return IRQ_NONE;
+	/* check dma interrupts */
+#endif
+	/* Platform/devcice interrupt handler */
+#if !defined(PLX_PCI_RDK)
+	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
+	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
+#endif
+	spin_unlock(&dev->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int net2272_present(struct net2272 *dev)
+{
+	/*
+	 * Quick test to see if CPU can communicate properly with the NET2272.
+	 * Verifies connection using writes and reads to write/read and
+	 * read-only registers.
+	 *
+	 * This routine is strongly recommended especially during early bring-up
+	 * of new hardware, however for designs that do not apply Power On System
+	 * Tests (POST) it may discarded (or perhaps minimized).
+	 */
+	unsigned int ii;
+	u8 val, refval;
+
+	/* Verify NET2272 write/read SCRATCH register can write and read */
+	refval = net2272_read(dev, SCRATCH);
+	for (ii = 0; ii < 0x100; ii += 7) {
+		net2272_write(dev, SCRATCH, ii);
+		val = net2272_read(dev, SCRATCH);
+		if (val != ii) {
+			dev_dbg(dev->dev,
+				"%s: write/read SCRATCH register test failed: "
+				"wrote:0x%2.2x, read:0x%2.2x\n",
+				__func__, ii, val);
+			return -EINVAL;
+		}
+	}
+	/* To be nice, we write the original SCRATCH value back: */
+	net2272_write(dev, SCRATCH, refval);
+
+	/* Verify NET2272 CHIPREV register is read-only: */
+	refval = net2272_read(dev, CHIPREV_2272);
+	for (ii = 0; ii < 0x100; ii += 7) {
+		net2272_write(dev, CHIPREV_2272, ii);
+		val = net2272_read(dev, CHIPREV_2272);
+		if (val != refval) {
+			dev_dbg(dev->dev,
+				"%s: write/read CHIPREV register test failed: "
+				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
+				__func__, ii, val, refval);
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * Verify NET2272's "NET2270 legacy revision" register
+	 *  - NET2272 has two revision registers. The NET2270 legacy revision
+	 *    register should read the same value, regardless of the NET2272
+	 *    silicon revision.  The legacy register applies to NET2270
+	 *    firmware being applied to the NET2272.
+	 */
+	val = net2272_read(dev, CHIPREV_LEGACY);
+	if (val != NET2270_LEGACY_REV) {
+		/*
+		 * Unexpected legacy revision value
+		 * - Perhaps the chip is a NET2270?
+		 */
+		dev_dbg(dev->dev,
+			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
+			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
+			__func__, NET2270_LEGACY_REV, val);
+		return -EINVAL;
+	}
+
+	/*
+	 * Verify NET2272 silicon revision
+	 *  - This revision register is appropriate for the silicon version
+	 *    of the NET2272
+	 */
+	val = net2272_read(dev, CHIPREV_2272);
+	switch (val) {
+	case CHIPREV_NET2272_R1:
+		/*
+		 * NET2272 Rev 1 has DMA related errata:
+		 *  - Newer silicon (Rev 1A or better) required
+		 */
+		dev_dbg(dev->dev,
+			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
+			__func__);
+		break;
+	case CHIPREV_NET2272_R1A:
+		break;
+	default:
+		/* NET2272 silicon version *may* not work with this firmware */
+		dev_dbg(dev->dev,
+			"%s: unexpected silicon revision register value: "
+			" CHIPREV_2272: 0x%2.2x\n",
+			__func__, val);
+		/*
+		 * Return Success, even though the chip rev is not an expected value
+		 *  - Older, pre-built firmware can attempt to operate on newer silicon
+		 *  - Often, new silicon is perfectly compatible
+		 */
+	}
+
+	/* Success: NET2272 checks out OK */
+	return 0;
+}
+
+static void
+net2272_gadget_release(struct device *_dev)
+{
+	struct net2272 *dev = dev_get_drvdata(_dev);
+	kfree(dev);
+}
+
+/*---------------------------------------------------------------------------*/
+
+static void __devexit
+net2272_remove(struct net2272 *dev)
+{
+	usb_del_gadget_udc(&dev->gadget);
+
+	/* start with the driver above us */
+	if (dev->driver) {
+		/* should have been done already by driver model core */
+		dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
+			dev->driver->driver.name);
+		usb_gadget_unregister_driver(dev->driver);
+	}
+
+	free_irq(dev->irq, dev);
+	iounmap(dev->base_addr);
+
+	device_unregister(&dev->gadget.dev);
+	device_remove_file(dev->dev, &dev_attr_registers);
+
+	dev_info(dev->dev, "unbind\n");
+}
+
+static struct net2272 * __devinit
+net2272_probe_init(struct device *dev, unsigned int irq)
+{
+	struct net2272 *ret;
+
+	if (!irq) {
+		dev_dbg(dev, "No IRQ!\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	/* alloc, and start init */
+	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+	if (!ret)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&ret->lock);
+	ret->irq = irq;
+	ret->dev = dev;
+	ret->gadget.ops = &net2272_ops;
+	ret->gadget.max_speed = USB_SPEED_HIGH;
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&ret->gadget.dev, "gadget");
+	ret->gadget.dev.parent = dev;
+	ret->gadget.dev.dma_mask = dev->dma_mask;
+	ret->gadget.dev.release = net2272_gadget_release;
+	ret->gadget.name = driver_name;
+
+	return ret;
+}
+
+static int __devinit
+net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
+{
+	int ret;
+
+	/* See if there... */
+	if (net2272_present(dev)) {
+		dev_warn(dev->dev, "2272 not found!\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	net2272_usb_reset(dev);
+	net2272_usb_reinit(dev);
+
+	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
+	if (ret) {
+		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
+		goto err;
+	}
+
+	dev->chiprev = net2272_read(dev, CHIPREV_2272);
+
+	/* done */
+	dev_info(dev->dev, "%s\n", driver_desc);
+	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
+		dev->irq, dev->base_addr, dev->chiprev,
+		dma_mode_string());
+	dev_info(dev->dev, "version: %s\n", driver_vers);
+
+	ret = device_register(&dev->gadget.dev);
+	if (ret)
+		goto err_irq;
+	ret = device_create_file(dev->dev, &dev_attr_registers);
+	if (ret)
+		goto err_dev_reg;
+
+	ret = usb_add_gadget_udc(dev->dev, &dev->gadget);
+	if (ret)
+		goto err_add_udc;
+
+	return 0;
+
+err_add_udc:
+	device_remove_file(dev->dev, &dev_attr_registers);
+ err_dev_reg:
+	device_unregister(&dev->gadget.dev);
+ err_irq:
+	free_irq(dev->irq, dev);
+ err:
+	return ret;
+}
+
+#ifdef CONFIG_PCI
+
+/*
+ * wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us
+ */
+
+static int __devinit
+net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
+{
+	unsigned long resource, len, tmp;
+	void __iomem *mem_mapped_addr[4];
+	int ret, i;
+
+	/*
+	 * BAR 0 holds PLX 9054 config registers
+	 * BAR 1 is i/o memory; unused here
+	 * BAR 2 holds EPLD config registers
+	 * BAR 3 holds NET2272 registers
+	 */
+
+	/* Find and map all address spaces */
+	for (i = 0; i < 4; ++i) {
+		if (i == 1)
+			continue;	/* BAR1 unused */
+
+		resource = pci_resource_start(pdev, i);
+		len = pci_resource_len(pdev, i);
+
+		if (!request_mem_region(resource, len, driver_name)) {
+			dev_dbg(dev->dev, "controller already in use\n");
+			ret = -EBUSY;
+			goto err;
+		}
+
+		mem_mapped_addr[i] = ioremap_nocache(resource, len);
+		if (mem_mapped_addr[i] == NULL) {
+			release_mem_region(resource, len);
+			dev_dbg(dev->dev, "can't map memory\n");
+			ret = -EFAULT;
+			goto err;
+		}
+	}
+
+	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
+	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
+	dev->base_addr = mem_mapped_addr[3];
+
+	/* Set PLX 9054 bus width (16 bits) */
+	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
+	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
+			dev->rdk1.plx9054_base_addr + LBRD1);
+
+	/* Enable PLX 9054 Interrupts */
+	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
+			(1 << PCI_INTERRUPT_ENABLE) |
+			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
+			dev->rdk1.plx9054_base_addr + INTCSR);
+
+	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
+			dev->rdk1.plx9054_base_addr + DMACSR0);
+
+	/* reset */
+	writeb((1 << EPLD_DMA_ENABLE) |
+		(1 << DMA_CTL_DACK) |
+		(1 << DMA_TIMEOUT_ENABLE) |
+		(1 << USER) |
+		(0 << MPX_MODE) |
+		(1 << BUSWIDTH) |
+		(1 << NET2272_RESET),
+		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
+
+	mb();
+	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
+		~(1 << NET2272_RESET),
+		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
+	udelay(200);
+
+	return 0;
+
+ err:
+	while (--i >= 0) {
+		iounmap(mem_mapped_addr[i]);
+		release_mem_region(pci_resource_start(pdev, i),
+			pci_resource_len(pdev, i));
+	}
+
+	return ret;
+}
+
+static int __devinit
+net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
+{
+	unsigned long resource, len;
+	void __iomem *mem_mapped_addr[2];
+	int ret, i;
+
+	/*
+	 * BAR 0 holds FGPA config registers
+	 * BAR 1 holds NET2272 registers
+	 */
+
+	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
+	for (i = 0; i < 2; ++i) {
+		resource = pci_resource_start(pdev, i);
+		len = pci_resource_len(pdev, i);
+
+		if (!request_mem_region(resource, len, driver_name)) {
+			dev_dbg(dev->dev, "controller already in use\n");
+			ret = -EBUSY;
+			goto err;
+		}
+
+		mem_mapped_addr[i] = ioremap_nocache(resource, len);
+		if (mem_mapped_addr[i] == NULL) {
+			release_mem_region(resource, len);
+			dev_dbg(dev->dev, "can't map memory\n");
+			ret = -EFAULT;
+			goto err;
+		}
+	}
+
+	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
+	dev->base_addr = mem_mapped_addr[1];
+
+	mb();
+	/* Set 2272 bus width (16 bits) and reset */
+	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
+	udelay(200);
+	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
+	/* Print fpga version number */
+	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
+		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
+	/* Enable FPGA Interrupts */
+	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
+
+	return 0;
+
+ err:
+	while (--i >= 0) {
+		iounmap(mem_mapped_addr[i]);
+		release_mem_region(pci_resource_start(pdev, i),
+			pci_resource_len(pdev, i));
+	}
+
+	return ret;
+}
+
+static int __devinit
+net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct net2272 *dev;
+	int ret;
+
+	dev = net2272_probe_init(&pdev->dev, pdev->irq);
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+	dev->dev_id = pdev->device;
+
+	if (pci_enable_device(pdev) < 0) {
+		ret = -ENODEV;
+		goto err_free;
+	}
+
+	pci_set_master(pdev);
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
+	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
+	default: BUG();
+	}
+	if (ret)
+		goto err_pci;
+
+	ret = net2272_probe_fin(dev, 0);
+	if (ret)
+		goto err_pci;
+
+	pci_set_drvdata(pdev, dev);
+
+	return 0;
+
+ err_pci:
+	pci_disable_device(pdev);
+ err_free:
+	kfree(dev);
+
+	return ret;
+}
+
+static void __devexit
+net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
+{
+	int i;
+
+	/* disable PLX 9054 interrupts */
+	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
+		~(1 << PCI_INTERRUPT_ENABLE),
+		dev->rdk1.plx9054_base_addr + INTCSR);
+
+	/* clean up resources allocated during probe() */
+	iounmap(dev->rdk1.plx9054_base_addr);
+	iounmap(dev->rdk1.epld_base_addr);
+
+	for (i = 0; i < 4; ++i) {
+		if (i == 1)
+			continue;	/* BAR1 unused */
+		release_mem_region(pci_resource_start(pdev, i),
+			pci_resource_len(pdev, i));
+	}
+}
+
+static void __devexit
+net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
+{
+	int i;
+
+	/* disable fpga interrupts
+	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
+			~(1 << PCI_INTERRUPT_ENABLE),
+			dev->rdk1.plx9054_base_addr + INTCSR);
+	*/
+
+	/* clean up resources allocated during probe() */
+	iounmap(dev->rdk2.fpga_base_addr);
+
+	for (i = 0; i < 2; ++i)
+		release_mem_region(pci_resource_start(pdev, i),
+			pci_resource_len(pdev, i));
+}
+
+static void __devexit
+net2272_pci_remove(struct pci_dev *pdev)
+{
+	struct net2272 *dev = pci_get_drvdata(pdev);
+
+	net2272_remove(dev);
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
+	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
+	default: BUG();
+	}
+
+	pci_disable_device(pdev);
+
+	kfree(dev);
+}
+
+/* Table of matching PCI IDs */
+static struct pci_device_id __devinitdata pci_ids[] = {
+	{	/* RDK 1 card */
+		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
+		.class_mask  = 0,
+		.vendor      = PCI_VENDOR_ID_PLX,
+		.device      = PCI_DEVICE_ID_RDK1,
+		.subvendor   = PCI_ANY_ID,
+		.subdevice   = PCI_ANY_ID,
+	},
+	{	/* RDK 2 card */
+		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
+		.class_mask  = 0,
+		.vendor      = PCI_VENDOR_ID_PLX,
+		.device      = PCI_DEVICE_ID_RDK2,
+		.subvendor   = PCI_ANY_ID,
+		.subdevice   = PCI_ANY_ID,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver net2272_pci_driver = {
+	.name     = driver_name,
+	.id_table = pci_ids,
+
+	.probe    = net2272_pci_probe,
+	.remove   = __devexit_p(net2272_pci_remove),
+};
+
+static int net2272_pci_register(void)
+{
+	return pci_register_driver(&net2272_pci_driver);
+}
+
+static void net2272_pci_unregister(void)
+{
+	pci_unregister_driver(&net2272_pci_driver);
+}
+
+#else
+static inline int net2272_pci_register(void) { return 0; }
+static inline void net2272_pci_unregister(void) { }
+#endif
+
+/*---------------------------------------------------------------------------*/
+
+static int __devinit
+net2272_plat_probe(struct platform_device *pdev)
+{
+	struct net2272 *dev;
+	int ret;
+	unsigned int irqflags;
+	resource_size_t base, len;
+	struct resource *iomem, *iomem_bus, *irq_res;
+
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
+	if (!irq_res || !iomem) {
+		dev_err(&pdev->dev, "must provide irq/base addr");
+		return -EINVAL;
+	}
+
+	dev = net2272_probe_init(&pdev->dev, irq_res->start);
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+
+	irqflags = 0;
+	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
+		irqflags |= IRQF_TRIGGER_RISING;
+	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
+		irqflags |= IRQF_TRIGGER_FALLING;
+	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
+		irqflags |= IRQF_TRIGGER_HIGH;
+	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
+		irqflags |= IRQF_TRIGGER_LOW;
+
+	base = iomem->start;
+	len = resource_size(iomem);
+	if (iomem_bus)
+		dev->base_shift = iomem_bus->start;
+
+	if (!request_mem_region(base, len, driver_name)) {
+		dev_dbg(dev->dev, "get request memory region!\n");
+		ret = -EBUSY;
+		goto err;
+	}
+	dev->base_addr = ioremap_nocache(base, len);
+	if (!dev->base_addr) {
+		dev_dbg(dev->dev, "can't map memory\n");
+		ret = -EFAULT;
+		goto err_req;
+	}
+
+	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
+	if (ret)
+		goto err_io;
+
+	platform_set_drvdata(pdev, dev);
+	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
+		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
+
+	return 0;
+
+ err_io:
+	iounmap(dev->base_addr);
+ err_req:
+	release_mem_region(base, len);
+ err:
+	return ret;
+}
+
+static int __devexit
+net2272_plat_remove(struct platform_device *pdev)
+{
+	struct net2272 *dev = platform_get_drvdata(pdev);
+
+	net2272_remove(dev);
+
+	release_mem_region(pdev->resource[0].start,
+		resource_size(&pdev->resource[0]));
+
+	kfree(dev);
+
+	return 0;
+}
+
+static struct platform_driver net2272_plat_driver = {
+	.probe   = net2272_plat_probe,
+	.remove  = __devexit_p(net2272_plat_remove),
+	.driver  = {
+		.name  = driver_name,
+		.owner = THIS_MODULE,
+	},
+	/* FIXME .suspend, .resume */
+};
+MODULE_ALIAS("platform:net2272");
+
+static int __init net2272_init(void)
+{
+	int ret;
+
+	ret = net2272_pci_register();
+	if (ret)
+		return ret;
+	ret = platform_driver_register(&net2272_plat_driver);
+	if (ret)
+		goto err_pci;
+	return ret;
+
+err_pci:
+	net2272_pci_unregister();
+	return ret;
+}
+module_init(net2272_init);
+
+static void __exit net2272_cleanup(void)
+{
+	net2272_pci_unregister();
+	platform_driver_unregister(&net2272_plat_driver);
+}
+module_exit(net2272_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("PLX Technology, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2272.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2272.h
new file mode 100644
index 0000000..e595057
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2272.h
@@ -0,0 +1,601 @@
+/*
+ * PLX NET2272 high/full speed USB device controller
+ *
+ * Copyright (C) 2005-2006 PLX Technology, Inc.
+ * Copyright (C) 2006-2011 Analog Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __NET2272_H__
+#define __NET2272_H__
+
+/* Main Registers */
+#define REGADDRPTR			0x00
+#define REGDATA				0x01
+#define IRQSTAT0			0x02
+#define 	ENDPOINT_0_INTERRUPT			0
+#define 	ENDPOINT_A_INTERRUPT			1
+#define 	ENDPOINT_B_INTERRUPT			2
+#define 	ENDPOINT_C_INTERRUPT			3
+#define 	VIRTUALIZED_ENDPOINT_INTERRUPT		4
+#define 	SETUP_PACKET_INTERRUPT			5
+#define 	DMA_DONE_INTERRUPT			6
+#define 	SOF_INTERRUPT				7
+#define IRQSTAT1			0x03
+#define 	CONTROL_STATUS_INTERRUPT		1
+#define 	VBUS_INTERRUPT				2
+#define 	SUSPEND_REQUEST_INTERRUPT		3
+#define 	SUSPEND_REQUEST_CHANGE_INTERRUPT	4
+#define 	RESUME_INTERRUPT			5
+#define 	ROOT_PORT_RESET_INTERRUPT		6
+#define 	RESET_STATUS				7
+#define PAGESEL				0x04
+#define DMAREQ				0x1c
+#define 	DMA_ENDPOINT_SELECT			0
+#define 	DREQ_POLARITY				1
+#define 	DACK_POLARITY				2
+#define 	EOT_POLARITY				3
+#define 	DMA_CONTROL_DACK			4
+#define 	DMA_REQUEST_ENABLE			5
+#define 	DMA_REQUEST				6
+#define 	DMA_BUFFER_VALID			7
+#define SCRATCH				0x1d
+#define IRQENB0				0x20
+#define 	ENDPOINT_0_INTERRUPT_ENABLE		0
+#define 	ENDPOINT_A_INTERRUPT_ENABLE		1
+#define 	ENDPOINT_B_INTERRUPT_ENABLE		2
+#define 	ENDPOINT_C_INTERRUPT_ENABLE		3
+#define 	VIRTUALIZED_ENDPOINT_INTERRUPT_ENABLE	4
+#define 	SETUP_PACKET_INTERRUPT_ENABLE		5
+#define 	DMA_DONE_INTERRUPT_ENABLE		6
+#define 	SOF_INTERRUPT_ENABLE			7
+#define IRQENB1				0x21
+#define 	VBUS_INTERRUPT_ENABLE			2
+#define 	SUSPEND_REQUEST_INTERRUPT_ENABLE	3
+#define 	SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE	4
+#define 	RESUME_INTERRUPT_ENABLE			5
+#define 	ROOT_PORT_RESET_INTERRUPT_ENABLE	6
+#define LOCCTL				0x22
+#define 	DATA_WIDTH				0
+#define 	LOCAL_CLOCK_OUTPUT			1
+#define 		LOCAL_CLOCK_OUTPUT_OFF			0
+#define 		LOCAL_CLOCK_OUTPUT_3_75MHZ		1
+#define 		LOCAL_CLOCK_OUTPUT_7_5MHZ		2
+#define 		LOCAL_CLOCK_OUTPUT_15MHZ		3
+#define 		LOCAL_CLOCK_OUTPUT_30MHZ		4
+#define 		LOCAL_CLOCK_OUTPUT_60MHZ		5
+#define 	DMA_SPLIT_BUS_MODE			4
+#define 	BYTE_SWAP				5
+#define 	BUFFER_CONFIGURATION			6
+#define 		BUFFER_CONFIGURATION_EPA512_EPB512	0
+#define 		BUFFER_CONFIGURATION_EPA1024_EPB512	1
+#define 		BUFFER_CONFIGURATION_EPA1024_EPB1024	2
+#define 		BUFFER_CONFIGURATION_EPA1024DB		3
+#define CHIPREV_LEGACY			0x23
+#define 		NET2270_LEGACY_REV			0x40
+#define LOCCTL1				0x24
+#define 	DMA_MODE				0
+#define 		SLOW_DREQ				0
+#define 		FAST_DREQ				1
+#define 		BURST_MODE				2
+#define 	DMA_DACK_ENABLE				2
+#define CHIPREV_2272			0x25
+#define 		CHIPREV_NET2272_R1			0x10
+#define 		CHIPREV_NET2272_R1A			0x11
+/* USB Registers */
+#define USBCTL0				0x18
+#define 	IO_WAKEUP_ENABLE			1
+#define 	USB_DETECT_ENABLE			3
+#define 	USB_ROOT_PORT_WAKEUP_ENABLE		5
+#define USBCTL1				0x19
+#define 	VBUS_PIN				0
+#define 		USB_FULL_SPEED				1
+#define 		USB_HIGH_SPEED				2
+#define 	GENERATE_RESUME				3
+#define 	VIRTUAL_ENDPOINT_ENABLE			4
+#define FRAME0				0x1a
+#define FRAME1				0x1b
+#define OURADDR				0x30
+#define 	FORCE_IMMEDIATE				7
+#define USBDIAG				0x31
+#define 	FORCE_TRANSMIT_CRC_ERROR		0
+#define 	PREVENT_TRANSMIT_BIT_STUFF		1
+#define 	FORCE_RECEIVE_ERROR			2
+#define 	FAST_TIMES				4
+#define USBTEST				0x32
+#define 	TEST_MODE_SELECT			0
+#define 		NORMAL_OPERATION			0
+#define 		TEST_J					1
+#define 		TEST_K					2
+#define 		TEST_SE0_NAK				3
+#define 		TEST_PACKET				4
+#define 		TEST_FORCE_ENABLE			5
+#define XCVRDIAG			0x33
+#define 	FORCE_FULL_SPEED			2
+#define 	FORCE_HIGH_SPEED			3
+#define 	OPMODE					4
+#define 		NORMAL_OPERATION			0
+#define 		NON_DRIVING				1
+#define 		DISABLE_BITSTUFF_AND_NRZI_ENCODE	2
+#define 	LINESTATE				6
+#define 		SE0_STATE				0
+#define 		J_STATE					1
+#define 		K_STATE					2
+#define 		SE1_STATE				3
+#define VIRTOUT0			0x34
+#define VIRTOUT1			0x35
+#define VIRTIN0				0x36
+#define VIRTIN1				0x37
+#define SETUP0				0x40
+#define SETUP1				0x41
+#define SETUP2				0x42
+#define SETUP3				0x43
+#define SETUP4				0x44
+#define SETUP5				0x45
+#define SETUP6				0x46
+#define SETUP7				0x47
+/* Endpoint Registers (Paged via PAGESEL) */
+#define EP_DATA				0x05
+#define EP_STAT0			0x06
+#define 	DATA_IN_TOKEN_INTERRUPT			0
+#define 	DATA_OUT_TOKEN_INTERRUPT		1
+#define 	DATA_PACKET_TRANSMITTED_INTERRUPT	2
+#define 	DATA_PACKET_RECEIVED_INTERRUPT		3
+#define 	SHORT_PACKET_TRANSFERRED_INTERRUPT	4
+#define 	NAK_OUT_PACKETS				5
+#define 	BUFFER_EMPTY				6
+#define 	BUFFER_FULL				7
+#define EP_STAT1			0x07
+#define 	TIMEOUT					0
+#define 	USB_OUT_ACK_SENT			1
+#define 	USB_OUT_NAK_SENT			2
+#define 	USB_IN_ACK_RCVD				3
+#define 	USB_IN_NAK_SENT				4
+#define 	USB_STALL_SENT				5
+#define 	LOCAL_OUT_ZLP				6
+#define 	BUFFER_FLUSH				7
+#define EP_TRANSFER0			0x08
+#define EP_TRANSFER1			0x09
+#define EP_TRANSFER2			0x0a
+#define EP_IRQENB			0x0b
+#define 	DATA_IN_TOKEN_INTERRUPT_ENABLE		0
+#define 	DATA_OUT_TOKEN_INTERRUPT_ENABLE		1
+#define 	DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE	2
+#define 	DATA_PACKET_RECEIVED_INTERRUPT_ENABLE	3
+#define 	SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE	4
+#define EP_AVAIL0			0x0c
+#define EP_AVAIL1			0x0d
+#define EP_RSPCLR			0x0e
+#define EP_RSPSET			0x0f
+#define 	ENDPOINT_HALT				0
+#define 	ENDPOINT_TOGGLE				1
+#define 	NAK_OUT_PACKETS_MODE			2
+#define 	CONTROL_STATUS_PHASE_HANDSHAKE		3
+#define 	INTERRUPT_MODE				4
+#define 	AUTOVALIDATE				5
+#define 	HIDE_STATUS_PHASE			6
+#define 	ALT_NAK_OUT_PACKETS			7
+#define EP_MAXPKT0			0x28
+#define EP_MAXPKT1			0x29
+#define 	ADDITIONAL_TRANSACTION_OPPORTUNITIES	3
+#define 		NONE_ADDITIONAL_TRANSACTION		0
+#define 		ONE_ADDITIONAL_TRANSACTION		1
+#define 		TWO_ADDITIONAL_TRANSACTION		2
+#define EP_CFG				0x2a
+#define 	ENDPOINT_NUMBER				0
+#define 	ENDPOINT_DIRECTION			4
+#define 	ENDPOINT_TYPE				5
+#define 	ENDPOINT_ENABLE				7
+#define EP_HBW				0x2b
+#define 	HIGH_BANDWIDTH_OUT_TRANSACTION_PID	0
+#define 		DATA0_PID				0
+#define 		DATA1_PID				1
+#define 		DATA2_PID				2
+#define 		MDATA_PID				3
+#define EP_BUFF_STATES			0x2c
+#define 	BUFFER_A_STATE				0
+#define 	BUFFER_B_STATE				2
+#define 		BUFF_FREE				0
+#define 		BUFF_VALID				1
+#define 		BUFF_LCL				2
+#define 		BUFF_USB				3
+
+/*---------------------------------------------------------------------------*/
+
+#define PCI_DEVICE_ID_RDK1	0x9054
+
+/* PCI-RDK EPLD Registers */
+#define RDK_EPLD_IO_REGISTER1		0x00000000
+#define 	RDK_EPLD_USB_RESET				0
+#define 	RDK_EPLD_USB_POWERDOWN				1
+#define 	RDK_EPLD_USB_WAKEUP				2
+#define 	RDK_EPLD_USB_EOT				3
+#define 	RDK_EPLD_DPPULL					4
+#define RDK_EPLD_IO_REGISTER2		0x00000004
+#define 	RDK_EPLD_BUSWIDTH				0
+#define 	RDK_EPLD_USER					2
+#define 	RDK_EPLD_RESET_INTERRUPT_ENABLE			3
+#define 	RDK_EPLD_DMA_TIMEOUT_ENABLE			4
+#define RDK_EPLD_STATUS_REGISTER	0x00000008
+#define 	RDK_EPLD_USB_LRESET				0
+#define RDK_EPLD_REVISION_REGISTER	0x0000000c
+
+/* PCI-RDK PLX 9054 Registers */
+#define INTCSR				0x68
+#define 	PCI_INTERRUPT_ENABLE				8
+#define 	LOCAL_INTERRUPT_INPUT_ENABLE			11
+#define 	LOCAL_INPUT_INTERRUPT_ACTIVE			15
+#define 	LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE		18
+#define 	LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE		19
+#define 	DMA_CHANNEL_0_INTERRUPT_ACTIVE			21
+#define 	DMA_CHANNEL_1_INTERRUPT_ACTIVE			22
+#define CNTRL				0x6C
+#define 	RELOAD_CONFIGURATION_REGISTERS			29
+#define 	PCI_ADAPTER_SOFTWARE_RESET			30
+#define DMAMODE0			0x80
+#define 	LOCAL_BUS_WIDTH					0
+#define 	INTERNAL_WAIT_STATES				2
+#define 	TA_READY_INPUT_ENABLE				6
+#define 	LOCAL_BURST_ENABLE				8
+#define 	SCATTER_GATHER_MODE				9
+#define 	DONE_INTERRUPT_ENABLE				10
+#define 	LOCAL_ADDRESSING_MODE				11
+#define 	DEMAND_MODE					12
+#define 	DMA_EOT_ENABLE					14
+#define 	FAST_SLOW_TERMINATE_MODE_SELECT			15
+#define 	DMA_CHANNEL_INTERRUPT_SELECT			17
+#define DMAPADR0			0x84
+#define DMALADR0			0x88
+#define DMASIZ0				0x8c
+#define DMADPR0				0x90
+#define 	DESCRIPTOR_LOCATION				0
+#define 	END_OF_CHAIN					1
+#define 	INTERRUPT_AFTER_TERMINAL_COUNT			2
+#define 	DIRECTION_OF_TRANSFER				3
+#define DMACSR0				0xa8
+#define 	CHANNEL_ENABLE					0
+#define 	CHANNEL_START					1
+#define 	CHANNEL_ABORT					2
+#define 	CHANNEL_CLEAR_INTERRUPT				3
+#define 	CHANNEL_DONE					4
+#define DMATHR				0xb0
+#define LBRD1				0xf8
+#define 	MEMORY_SPACE_LOCAL_BUS_WIDTH			0
+#define 	W8_BIT						0
+#define 	W16_BIT						1
+
+/* Special OR'ing of INTCSR bits */
+#define LOCAL_INTERRUPT_TEST \
+	((1 << LOCAL_INPUT_INTERRUPT_ACTIVE) | \
+	 (1 << LOCAL_INTERRUPT_INPUT_ENABLE))
+
+#define DMA_CHANNEL_0_TEST \
+	((1 << DMA_CHANNEL_0_INTERRUPT_ACTIVE) | \
+	 (1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE))
+
+#define DMA_CHANNEL_1_TEST \
+	((1 << DMA_CHANNEL_1_INTERRUPT_ACTIVE) | \
+	 (1 << LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE))
+
+/* EPLD Registers */
+#define RDK_EPLD_IO_REGISTER1			0x00000000
+#define 	RDK_EPLD_USB_RESET			0
+#define 	RDK_EPLD_USB_POWERDOWN			1
+#define 	RDK_EPLD_USB_WAKEUP			2
+#define 	RDK_EPLD_USB_EOT			3
+#define 	RDK_EPLD_DPPULL				4
+#define RDK_EPLD_IO_REGISTER2			0x00000004
+#define 	RDK_EPLD_BUSWIDTH			0
+#define 	RDK_EPLD_USER				2
+#define 	RDK_EPLD_RESET_INTERRUPT_ENABLE		3
+#define 	RDK_EPLD_DMA_TIMEOUT_ENABLE		4
+#define RDK_EPLD_STATUS_REGISTER		0x00000008
+#define RDK_EPLD_USB_LRESET				0
+#define RDK_EPLD_REVISION_REGISTER		0x0000000c
+
+#define EPLD_IO_CONTROL_REGISTER		0x400
+#define 	NET2272_RESET				0
+#define 	BUSWIDTH				1
+#define 	MPX_MODE				3
+#define 	USER					4
+#define 	DMA_TIMEOUT_ENABLE			5
+#define 	DMA_CTL_DACK				6
+#define 	EPLD_DMA_ENABLE				7
+#define EPLD_DMA_CONTROL_REGISTER		0x800
+#define 	SPLIT_DMA_MODE				0
+#define 	SPLIT_DMA_DIRECTION			1
+#define 	SPLIT_DMA_ENABLE			2
+#define 	SPLIT_DMA_INTERRUPT_ENABLE		3
+#define 	SPLIT_DMA_INTERRUPT			4
+#define 	EPLD_DMA_MODE				5
+#define 	EPLD_DMA_CONTROLLER_ENABLE		7
+#define SPLIT_DMA_ADDRESS_LOW			0xc00
+#define SPLIT_DMA_ADDRESS_HIGH			0x1000
+#define SPLIT_DMA_BYTE_COUNT_LOW		0x1400
+#define SPLIT_DMA_BYTE_COUNT_HIGH		0x1800
+#define EPLD_REVISION_REGISTER			0x1c00
+#define SPLIT_DMA_RAM				0x4000
+#define DMA_RAM_SIZE				0x1000
+
+/*---------------------------------------------------------------------------*/
+
+#define PCI_DEVICE_ID_RDK2	0x3272
+
+/* PCI-RDK version 2 registers */
+
+/* Main Control Registers */
+
+#define RDK2_IRQENB			0x00
+#define RDK2_IRQSTAT			0x04
+#define 	PB7				23
+#define 	PB6				22
+#define 	PB5				21
+#define 	PB4				20
+#define 	PB3				19
+#define 	PB2				18
+#define 	PB1				17
+#define 	PB0				16
+#define 	GP3				23
+#define 	GP2				23
+#define 	GP1				23
+#define 	GP0				23
+#define 	DMA_RETRY_ABORT			6
+#define 	DMA_PAUSE_DONE			5
+#define 	DMA_ABORT_DONE			4
+#define 	DMA_OUT_FIFO_TRANSFER_DONE	3
+#define 	DMA_LOCAL_DONE			2
+#define 	DMA_PCI_DONE			1
+#define 	NET2272_PCI_IRQ			0
+
+#define RDK2_LOCCTLRDK			0x08
+#define 	CHIP_RESET			3
+#define 	SPLIT_DMA			2
+#define 	MULTIPLEX_MODE			1
+#define 	BUS_WIDTH			0
+
+#define RDK2_GPIOCTL			0x10
+#define 	GP3_OUT_ENABLE					7
+#define 	GP2_OUT_ENABLE					6
+#define 	GP1_OUT_ENABLE					5
+#define 	GP0_OUT_ENABLE					4
+#define 	GP3_DATA					3
+#define 	GP2_DATA					2
+#define 	GP1_DATA					1
+#define 	GP0_DATA					0
+
+#define RDK2_LEDSW			0x14
+#define 	LED3				27
+#define 	LED2				26
+#define 	LED1				25
+#define 	LED0				24
+#define 	PBUTTON				16
+#define 	DIPSW				0
+
+#define RDK2_DIAG			0x18
+#define 	RDK2_FAST_TIMES				2
+#define 	FORCE_PCI_SERR				1
+#define 	FORCE_PCI_INT				0
+#define RDK2_FPGAREV			0x1C
+
+/* Dma Control registers */
+#define RDK2_DMACTL			0x80
+#define 	ADDR_HOLD				24
+#define 	RETRY_COUNT				16	/* 23:16 */
+#define 	FIFO_THRESHOLD				11	/* 15:11 */
+#define 	MEM_WRITE_INVALIDATE			10
+#define 	READ_MULTIPLE				9
+#define 	READ_LINE				8
+#define 	RDK2_DMA_MODE				6	/* 7:6 */
+#define 	CONTROL_DACK				5
+#define 	EOT_ENABLE				4
+#define 	EOT_POLARITY				3
+#define 	DACK_POLARITY				2
+#define 	DREQ_POLARITY				1
+#define 	DMA_ENABLE				0
+
+#define RDK2_DMASTAT			0x84
+#define 	GATHER_COUNT				12	/* 14:12 */
+#define 	FIFO_COUNT				6	/* 11:6 */
+#define 	FIFO_FLUSH				5
+#define 	FIFO_TRANSFER				4
+#define 	PAUSE_DONE				3
+#define 	ABORT_DONE				2
+#define 	DMA_ABORT				1
+#define 	DMA_START				0
+
+#define RDK2_DMAPCICOUNT		0x88
+#define 	DMA_DIRECTION				31
+#define 	DMA_PCI_BYTE_COUNT			0	/* 0:23 */
+
+#define RDK2_DMALOCCOUNT		0x8C	/* 0:23 dma local byte count */
+
+#define RDK2_DMAADDR			0x90	/* 2:31 PCI bus starting address */
+
+/*---------------------------------------------------------------------------*/
+
+#define REG_INDEXED_THRESHOLD	(1 << 5)
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+struct net2272_ep {
+	struct usb_ep ep;
+	struct net2272 *dev;
+	unsigned long irqs;
+
+	/* analogous to a host-side qh */
+	struct list_head queue;
+	const struct usb_endpoint_descriptor *desc;
+	unsigned num:8,
+	         fifo_size:12,
+	         stopped:1,
+	         wedged:1,
+	         is_in:1,
+	         is_iso:1,
+	         dma:1,
+	         not_empty:1;
+};
+
+struct net2272 {
+	/* each device provides one gadget, several endpoints */
+	struct usb_gadget gadget;
+	struct device *dev;
+	unsigned short dev_id;
+
+	spinlock_t lock;
+	struct net2272_ep ep[4];
+	struct usb_gadget_driver *driver;
+	unsigned protocol_stall:1,
+	         softconnect:1,
+	         is_selfpowered:1,
+	         wakeup:1,
+	         dma_eot_polarity:1,
+	         dma_dack_polarity:1,
+	         dma_dreq_polarity:1,
+	         dma_busy:1;
+	u16 chiprev;
+	u8 pagesel;
+
+	unsigned int irq;
+	unsigned short fifo_mode;
+
+	unsigned int base_shift;
+	u16 __iomem *base_addr;
+	union {
+#ifdef CONFIG_PCI
+		struct {
+			void __iomem *plx9054_base_addr;
+			void __iomem *epld_base_addr;
+		} rdk1;
+		struct {
+			/* Bar0, Bar1 is base_addr both mem-mapped */
+			void __iomem *fpga_base_addr;
+		} rdk2;
+#endif
+	};
+};
+
+static void __iomem *
+net2272_reg_addr(struct net2272 *dev, unsigned int reg)
+{
+	return dev->base_addr + (reg << dev->base_shift);
+}
+
+static void
+net2272_write(struct net2272 *dev, unsigned int reg, u8 value)
+{
+	if (reg >= REG_INDEXED_THRESHOLD) {
+		/*
+		 * Indexed register; use REGADDRPTR/REGDATA
+		 *  - Save and restore REGADDRPTR. This prevents REGADDRPTR from
+		 *    changes between other code sections, but it is time consuming.
+		 *  - Performance tips: either do not save and restore REGADDRPTR (if it
+		 *    is safe) or do save/restore operations only in critical sections.
+		u8 tmp = readb(dev->base_addr + REGADDRPTR);
+		 */
+		writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR));
+		writeb(value, net2272_reg_addr(dev, REGDATA));
+		/* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */
+	} else
+		writeb(value, net2272_reg_addr(dev, reg));
+}
+
+static u8
+net2272_read(struct net2272 *dev, unsigned int reg)
+{
+	u8 ret;
+
+	if (reg >= REG_INDEXED_THRESHOLD) {
+		/*
+		 * Indexed register; use REGADDRPTR/REGDATA
+		 *  - Save and restore REGADDRPTR. This prevents REGADDRPTR from
+		 *    changes between other code sections, but it is time consuming.
+		 *  - Performance tips: either do not save and restore REGADDRPTR (if it
+		 *    is safe) or do save/restore operations only in critical sections.
+		u8 tmp = readb(dev->base_addr + REGADDRPTR);
+		 */
+		writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR));
+		ret = readb(net2272_reg_addr(dev, REGDATA));
+		/* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */
+	} else
+		ret = readb(net2272_reg_addr(dev, reg));
+
+	return ret;
+}
+
+static void
+net2272_ep_write(struct net2272_ep *ep, unsigned int reg, u8 value)
+{
+	struct net2272 *dev = ep->dev;
+
+	if (dev->pagesel != ep->num) {
+		net2272_write(dev, PAGESEL, ep->num);
+		dev->pagesel = ep->num;
+	}
+	net2272_write(dev, reg, value);
+}
+
+static u8
+net2272_ep_read(struct net2272_ep *ep, unsigned int reg)
+{
+	struct net2272 *dev = ep->dev;
+
+	if (dev->pagesel != ep->num) {
+		net2272_write(dev, PAGESEL, ep->num);
+		dev->pagesel = ep->num;
+	}
+	return net2272_read(dev, reg);
+}
+
+static void allow_status(struct net2272_ep *ep)
+{
+	/* ep0 only */
+	net2272_ep_write(ep, EP_RSPCLR,
+		(1 << CONTROL_STATUS_PHASE_HANDSHAKE) |
+		(1 << ALT_NAK_OUT_PACKETS) |
+		(1 << NAK_OUT_PACKETS_MODE));
+	ep->stopped = 1;
+}
+
+static void set_halt(struct net2272_ep *ep)
+{
+	/* ep0 and bulk/intr endpoints */
+	net2272_ep_write(ep, EP_RSPCLR, 1 << CONTROL_STATUS_PHASE_HANDSHAKE);
+	net2272_ep_write(ep, EP_RSPSET, 1 << ENDPOINT_HALT);
+}
+
+static void clear_halt(struct net2272_ep *ep)
+{
+	/* ep0 and bulk/intr endpoints */
+	net2272_ep_write(ep, EP_RSPCLR,
+		(1 << ENDPOINT_HALT) | (1 << ENDPOINT_TOGGLE));
+}
+
+/* count (<= 4) bytes in the next fifo write will be valid */
+static void set_fifo_bytecount(struct net2272_ep *ep, unsigned count)
+{
+	/* net2272_ep_write will truncate to u8 for us */
+	net2272_ep_write(ep, EP_TRANSFER2, count >> 16);
+	net2272_ep_write(ep, EP_TRANSFER1, count >> 8);
+	net2272_ep_write(ep, EP_TRANSFER0, count);
+}
+
+struct net2272_request {
+	struct usb_request req;
+	struct list_head queue;
+	unsigned mapped:1,
+	         valid:1;
+};
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2280.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2280.c
new file mode 100644
index 0000000..ac335af
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2280.c
@@ -0,0 +1,2896 @@
+/*
+ * Driver for the PLX NET2280 USB device controller.
+ * Specs and errata are available from <http://www.plxtech.com>.
+ *
+ * PLX Technology Inc. (formerly NetChip Technology) supported the
+ * development of this driver.
+ *
+ *
+ * CODE STATUS HIGHLIGHTS
+ *
+ * This driver should work well with most "gadget" drivers, including
+ * the File Storage, Serial, and Ethernet/RNDIS gadget drivers
+ * as well as Gadget Zero and Gadgetfs.
+ *
+ * DMA is enabled by default.  Drivers using transfer queues might use
+ * DMA chaining to remove IRQ latencies between transfers.  (Except when
+ * short OUT transfers happen.)  Drivers can use the req->no_interrupt
+ * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
+ * and DMA chaining is enabled.
+ *
+ * Note that almost all the errata workarounds here are only needed for
+ * rev1 chips.  Rev1a silicon (0110) fixes almost all of them.
+ */
+
+/*
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003-2005 PLX Technology, Inc.
+ *
+ * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
+ *	with 2282 chip
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#undef	DEBUG		/* messages on error and most fault paths */
+#undef	VERBOSE		/* extra debug messages (success too) */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+
+#define	DRIVER_DESC		"PLX NET228x USB Peripheral Controller"
+#define	DRIVER_VERSION		"2005 Sept 27"
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+#define	EP_DONTUSE		13	/* nonzero */
+
+#define USE_RDK_LEDS		/* GPIO pins control three LEDs */
+
+
+static const char driver_name [] = "net2280";
+static const char driver_desc [] = DRIVER_DESC;
+
+static const char ep0name [] = "ep0";
+static const char *const ep_name [] = {
+	ep0name,
+	"ep-a", "ep-b", "ep-c", "ep-d",
+	"ep-e", "ep-f",
+};
+
+/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
+ * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
+ *
+ * The net2280 DMA engines are not tightly integrated with their FIFOs;
+ * not all cases are (yet) handled well in this driver or the silicon.
+ * Some gadget drivers work better with the dma support here than others.
+ * These two parameters let you use PIO or more aggressive DMA.
+ */
+static bool use_dma = 1;
+static bool use_dma_chaining = 0;
+
+/* "modprobe net2280 use_dma=n" etc */
+module_param (use_dma, bool, S_IRUGO);
+module_param (use_dma_chaining, bool, S_IRUGO);
+
+
+/* mode 0 == ep-{a,b,c,d} 1K fifo each
+ * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
+ * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
+ */
+static ushort fifo_mode = 0;
+
+/* "modprobe net2280 fifo_mode=1" etc */
+module_param (fifo_mode, ushort, 0644);
+
+/* enable_suspend -- When enabled, the driver will respond to
+ * USB suspend requests by powering down the NET2280.  Otherwise,
+ * USB suspend requests will be ignored.  This is acceptable for
+ * self-powered devices
+ */
+static bool enable_suspend = 0;
+
+/* "modprobe net2280 enable_suspend=1" etc */
+module_param (enable_suspend, bool, S_IRUGO);
+
+
+#define	DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
+
+#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
+static char *type_string (u8 bmAttributes)
+{
+	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:	return "bulk";
+	case USB_ENDPOINT_XFER_ISOC:	return "iso";
+	case USB_ENDPOINT_XFER_INT:	return "intr";
+	};
+	return "control";
+}
+#endif
+
+#include "net2280.h"
+
+#define valid_bit	cpu_to_le32 (1 << VALID_BIT)
+#define dma_done_ie	cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
+
+/*-------------------------------------------------------------------------*/
+
+static int
+net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct net2280		*dev;
+	struct net2280_ep	*ep;
+	u32			max, tmp;
+	unsigned long		flags;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || !desc || ep->desc || _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	/* erratum 0119 workaround ties up an endpoint number */
+	if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
+		return -EDOM;
+
+	/* sanity check ep-e/ep-f since their fifos are small */
+	max = usb_endpoint_maxp (desc) & 0x1fff;
+	if (ep->num > 4 && max > 64)
+		return -ERANGE;
+
+	spin_lock_irqsave (&dev->lock, flags);
+	_ep->maxpacket = max & 0x7ff;
+	ep->desc = desc;
+
+	/* ep_reset() has already been called */
+	ep->stopped = 0;
+	ep->wedged = 0;
+	ep->out_overflow = 0;
+
+	/* set speed-dependent max packet; may kick in high bandwidth */
+	set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
+
+	/* FIFO lines can't go to different packets.  PIO is ok, so
+	 * use it instead of troublesome (non-bulk) multi-packet DMA.
+	 */
+	if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
+		DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
+			ep->ep.name, ep->ep.maxpacket);
+		ep->dma = NULL;
+	}
+
+	/* set type, direction, address; reset fifo counters */
+	writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
+	tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
+	if (tmp == USB_ENDPOINT_XFER_INT) {
+		/* erratum 0105 workaround prevents hs NYET */
+		if (dev->chiprev == 0100
+				&& dev->gadget.speed == USB_SPEED_HIGH
+				&& !(desc->bEndpointAddress & USB_DIR_IN))
+			writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
+				&ep->regs->ep_rsp);
+	} else if (tmp == USB_ENDPOINT_XFER_BULK) {
+		/* catch some particularly blatant driver bugs */
+		if ((dev->gadget.speed == USB_SPEED_HIGH
+					&& max != 512)
+				|| (dev->gadget.speed == USB_SPEED_FULL
+					&& max > 64)) {
+			spin_unlock_irqrestore (&dev->lock, flags);
+			return -ERANGE;
+		}
+	}
+	ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
+	tmp <<= ENDPOINT_TYPE;
+	tmp |= desc->bEndpointAddress;
+	tmp |= (4 << ENDPOINT_BYTE_COUNT);	/* default full fifo lines */
+	tmp |= 1 << ENDPOINT_ENABLE;
+	wmb ();
+
+	/* for OUT transfers, block the rx fifo until a read is posted */
+	ep->is_in = (tmp & USB_DIR_IN) != 0;
+	if (!ep->is_in)
+		writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+	else if (dev->pdev->device != 0x2280) {
+		/* Added for 2282, Don't use nak packets on an in endpoint,
+		 * this was ignored on 2280
+		 */
+		writel ((1 << CLEAR_NAK_OUT_PACKETS)
+			| (1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
+	}
+
+	writel (tmp, &ep->regs->ep_cfg);
+
+	/* enable irqs */
+	if (!ep->dma) {				/* pio, per-packet */
+		tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
+		writel (tmp, &dev->regs->pciirqenb0);
+
+		tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
+		if (dev->pdev->device == 0x2280)
+			tmp |= readl (&ep->regs->ep_irqenb);
+		writel (tmp, &ep->regs->ep_irqenb);
+	} else {				/* dma, per-request */
+		tmp = (1 << (8 + ep->num));	/* completion */
+		tmp |= readl (&dev->regs->pciirqenb1);
+		writel (tmp, &dev->regs->pciirqenb1);
+
+		/* for short OUT transfers, dma completions can't
+		 * advance the queue; do it pio-style, by hand.
+		 * NOTE erratum 0112 workaround #2
+		 */
+		if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
+			tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
+			writel (tmp, &ep->regs->ep_irqenb);
+
+			tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
+			writel (tmp, &dev->regs->pciirqenb0);
+		}
+	}
+
+	tmp = desc->bEndpointAddress;
+	DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
+		_ep->name, tmp & 0x0f, DIR_STRING (tmp),
+		type_string (desc->bmAttributes),
+		ep->dma ? "dma" : "pio", max);
+
+	/* pci writes may still be posted */
+	spin_unlock_irqrestore (&dev->lock, flags);
+	return 0;
+}
+
+static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
+{
+	u32	result;
+
+	do {
+		result = readl (ptr);
+		if (result == ~(u32)0)		/* "device unplugged" */
+			return -ENODEV;
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay (1);
+		usec--;
+	} while (usec > 0);
+	return -ETIMEDOUT;
+}
+
+static const struct usb_ep_ops net2280_ep_ops;
+
+static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
+{
+	u32		tmp;
+
+	ep->desc = NULL;
+	INIT_LIST_HEAD (&ep->queue);
+
+	ep->ep.maxpacket = ~0;
+	ep->ep.ops = &net2280_ep_ops;
+
+	/* disable the dma, irqs, endpoint... */
+	if (ep->dma) {
+		writel (0, &ep->dma->dmactl);
+		writel (  (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
+			| (1 << DMA_TRANSACTION_DONE_INTERRUPT)
+			| (1 << DMA_ABORT)
+			, &ep->dma->dmastat);
+
+		tmp = readl (&regs->pciirqenb0);
+		tmp &= ~(1 << ep->num);
+		writel (tmp, &regs->pciirqenb0);
+	} else {
+		tmp = readl (&regs->pciirqenb1);
+		tmp &= ~(1 << (8 + ep->num));	/* completion */
+		writel (tmp, &regs->pciirqenb1);
+	}
+	writel (0, &ep->regs->ep_irqenb);
+
+	/* init to our chosen defaults, notably so that we NAK OUT
+	 * packets until the driver queues a read (+note erratum 0112)
+	 */
+	if (!ep->is_in || ep->dev->pdev->device == 0x2280) {
+		tmp = (1 << SET_NAK_OUT_PACKETS_MODE)
+		| (1 << SET_NAK_OUT_PACKETS)
+		| (1 << CLEAR_EP_HIDE_STATUS_PHASE)
+		| (1 << CLEAR_INTERRUPT_MODE);
+	} else {
+		/* added for 2282 */
+		tmp = (1 << CLEAR_NAK_OUT_PACKETS_MODE)
+		| (1 << CLEAR_NAK_OUT_PACKETS)
+		| (1 << CLEAR_EP_HIDE_STATUS_PHASE)
+		| (1 << CLEAR_INTERRUPT_MODE);
+	}
+
+	if (ep->num != 0) {
+		tmp |= (1 << CLEAR_ENDPOINT_TOGGLE)
+			| (1 << CLEAR_ENDPOINT_HALT);
+	}
+	writel (tmp, &ep->regs->ep_rsp);
+
+	/* scrub most status bits, and flush any fifo state */
+	if (ep->dev->pdev->device == 0x2280)
+		tmp = (1 << FIFO_OVERFLOW)
+			| (1 << FIFO_UNDERFLOW);
+	else
+		tmp = 0;
+
+	writel (tmp | (1 << TIMEOUT)
+		| (1 << USB_STALL_SENT)
+		| (1 << USB_IN_NAK_SENT)
+		| (1 << USB_IN_ACK_RCVD)
+		| (1 << USB_OUT_PING_NAK_SENT)
+		| (1 << USB_OUT_ACK_SENT)
+		| (1 << FIFO_FLUSH)
+		| (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
+		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
+		| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+		| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+		| (1 << DATA_IN_TOKEN_INTERRUPT)
+		, &ep->regs->ep_stat);
+
+	/* fifo size is handled separately */
+}
+
+static void nuke (struct net2280_ep *);
+
+static int net2280_disable (struct usb_ep *_ep)
+{
+	struct net2280_ep	*ep;
+	unsigned long		flags;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || !ep->desc || _ep->name == ep0name)
+		return -EINVAL;
+
+	spin_lock_irqsave (&ep->dev->lock, flags);
+	nuke (ep);
+	ep_reset (ep->dev->regs, ep);
+
+	VDEBUG (ep->dev, "disabled %s %s\n",
+			ep->dma ? "dma" : "pio", _ep->name);
+
+	/* synch memory views with the device */
+	(void) readl (&ep->regs->ep_cfg);
+
+	if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
+		ep->dma = &ep->dev->dma [ep->num - 1];
+
+	spin_unlock_irqrestore (&ep->dev->lock, flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct net2280_ep	*ep;
+	struct net2280_request	*req;
+
+	if (!_ep)
+		return NULL;
+	ep = container_of (_ep, struct net2280_ep, ep);
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD (&req->queue);
+
+	/* this dma descriptor may be swapped with the previous dummy */
+	if (ep->dma) {
+		struct net2280_dma	*td;
+
+		td = pci_pool_alloc (ep->dev->requests, gfp_flags,
+				&req->td_dma);
+		if (!td) {
+			kfree (req);
+			return NULL;
+		}
+		td->dmacount = 0;	/* not VALID */
+		td->dmaaddr = cpu_to_le32 (DMA_ADDR_INVALID);
+		td->dmadesc = td->dmaaddr;
+		req->td = td;
+	}
+	return &req->req;
+}
+
+static void
+net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct net2280_ep	*ep;
+	struct net2280_request	*req;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || !_req)
+		return;
+
+	req = container_of (_req, struct net2280_request, req);
+	WARN_ON (!list_empty (&req->queue));
+	if (req->td)
+		pci_pool_free (ep->dev->requests, req->td, req->td_dma);
+	kfree (req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* load a packet into the fifo we use for usb IN transfers.
+ * works for all endpoints.
+ *
+ * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
+ * at a time, but this code is simpler because it knows it only writes
+ * one packet.  ep-a..ep-d should use dma instead.
+ */
+static void
+write_fifo (struct net2280_ep *ep, struct usb_request *req)
+{
+	struct net2280_ep_regs	__iomem *regs = ep->regs;
+	u8			*buf;
+	u32			tmp;
+	unsigned		count, total;
+
+	/* INVARIANT:  fifo is currently empty. (testable) */
+
+	if (req) {
+		buf = req->buf + req->actual;
+		prefetch (buf);
+		total = req->length - req->actual;
+	} else {
+		total = 0;
+		buf = NULL;
+	}
+
+	/* write just one packet at a time */
+	count = ep->ep.maxpacket;
+	if (count > total)	/* min() cannot be used on a bitfield */
+		count = total;
+
+	VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
+			ep->ep.name, count,
+			(count != ep->ep.maxpacket) ? " (short)" : "",
+			req);
+	while (count >= 4) {
+		/* NOTE be careful if you try to align these. fifo lines
+		 * should normally be full (4 bytes) and successive partial
+		 * lines are ok only in certain cases.
+		 */
+		tmp = get_unaligned ((u32 *)buf);
+		cpu_to_le32s (&tmp);
+		writel (tmp, &regs->ep_data);
+		buf += 4;
+		count -= 4;
+	}
+
+	/* last fifo entry is "short" unless we wrote a full packet.
+	 * also explicitly validate last word in (periodic) transfers
+	 * when maxpacket is not a multiple of 4 bytes.
+	 */
+	if (count || total < ep->ep.maxpacket) {
+		tmp = count ? get_unaligned ((u32 *)buf) : count;
+		cpu_to_le32s (&tmp);
+		set_fifo_bytecount (ep, count & 0x03);
+		writel (tmp, &regs->ep_data);
+	}
+
+	/* pci writes may still be posted */
+}
+
+/* work around erratum 0106: PCI and USB race over the OUT fifo.
+ * caller guarantees chiprev 0100, out endpoint is NAKing, and
+ * there's no real data in the fifo.
+ *
+ * NOTE:  also used in cases where that erratum doesn't apply:
+ * where the host wrote "too much" data to us.
+ */
+static void out_flush (struct net2280_ep *ep)
+{
+	u32	__iomem *statp;
+	u32	tmp;
+
+	ASSERT_OUT_NAKING (ep);
+
+	statp = &ep->regs->ep_stat;
+	writel (  (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+		| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+		, statp);
+	writel ((1 << FIFO_FLUSH), statp);
+	mb ();
+	tmp = readl (statp);
+	if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+			/* high speed did bulk NYET; fifo isn't filling */
+			&& ep->dev->gadget.speed == USB_SPEED_FULL) {
+		unsigned	usec;
+
+		usec = 50;		/* 64 byte bulk/interrupt */
+		handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
+				(1 << USB_OUT_PING_NAK_SENT), usec);
+		/* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
+	}
+}
+
+/* unload packet(s) from the fifo we use for usb OUT transfers.
+ * returns true iff the request completed, because of short packet
+ * or the request buffer having filled with full packets.
+ *
+ * for ep-a..ep-d this will read multiple packets out when they
+ * have been accepted.
+ */
+static int
+read_fifo (struct net2280_ep *ep, struct net2280_request *req)
+{
+	struct net2280_ep_regs	__iomem *regs = ep->regs;
+	u8			*buf = req->req.buf + req->req.actual;
+	unsigned		count, tmp, is_short;
+	unsigned		cleanup = 0, prevent = 0;
+
+	/* erratum 0106 ... packets coming in during fifo reads might
+	 * be incompletely rejected.  not all cases have workarounds.
+	 */
+	if (ep->dev->chiprev == 0x0100
+			&& ep->dev->gadget.speed == USB_SPEED_FULL) {
+		udelay (1);
+		tmp = readl (&ep->regs->ep_stat);
+		if ((tmp & (1 << NAK_OUT_PACKETS)))
+			cleanup = 1;
+		else if ((tmp & (1 << FIFO_FULL))) {
+			start_out_naking (ep);
+			prevent = 1;
+		}
+		/* else: hope we don't see the problem */
+	}
+
+	/* never overflow the rx buffer. the fifo reads packets until
+	 * it sees a short one; we might not be ready for them all.
+	 */
+	prefetchw (buf);
+	count = readl (&regs->ep_avail);
+	if (unlikely (count == 0)) {
+		udelay (1);
+		tmp = readl (&ep->regs->ep_stat);
+		count = readl (&regs->ep_avail);
+		/* handled that data already? */
+		if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
+			return 0;
+	}
+
+	tmp = req->req.length - req->req.actual;
+	if (count > tmp) {
+		/* as with DMA, data overflow gets flushed */
+		if ((tmp % ep->ep.maxpacket) != 0) {
+			ERROR (ep->dev,
+				"%s out fifo %d bytes, expected %d\n",
+				ep->ep.name, count, tmp);
+			req->req.status = -EOVERFLOW;
+			cleanup = 1;
+			/* NAK_OUT_PACKETS will be set, so flushing is safe;
+			 * the next read will start with the next packet
+			 */
+		} /* else it's a ZLP, no worries */
+		count = tmp;
+	}
+	req->req.actual += count;
+
+	is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
+
+	VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
+			ep->ep.name, count, is_short ? " (short)" : "",
+			cleanup ? " flush" : "", prevent ? " nak" : "",
+			req, req->req.actual, req->req.length);
+
+	while (count >= 4) {
+		tmp = readl (&regs->ep_data);
+		cpu_to_le32s (&tmp);
+		put_unaligned (tmp, (u32 *)buf);
+		buf += 4;
+		count -= 4;
+	}
+	if (count) {
+		tmp = readl (&regs->ep_data);
+		/* LE conversion is implicit here: */
+		do {
+			*buf++ = (u8) tmp;
+			tmp >>= 8;
+		} while (--count);
+	}
+	if (cleanup)
+		out_flush (ep);
+	if (prevent) {
+		writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+		(void) readl (&ep->regs->ep_rsp);
+	}
+
+	return is_short || ((req->req.actual == req->req.length)
+				&& !req->req.zero);
+}
+
+/* fill out dma descriptor to match a given request */
+static void
+fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
+{
+	struct net2280_dma	*td = req->td;
+	u32			dmacount = req->req.length;
+
+	/* don't let DMA continue after a short OUT packet,
+	 * so overruns can't affect the next transfer.
+	 * in case of overruns on max-size packets, we can't
+	 * stop the fifo from filling but we can flush it.
+	 */
+	if (ep->is_in)
+		dmacount |= (1 << DMA_DIRECTION);
+	if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0)
+			|| ep->dev->pdev->device != 0x2280)
+		dmacount |= (1 << END_OF_CHAIN);
+
+	req->valid = valid;
+	if (valid)
+		dmacount |= (1 << VALID_BIT);
+	if (likely(!req->req.no_interrupt || !use_dma_chaining))
+		dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
+
+	/* td->dmadesc = previously set by caller */
+	td->dmaaddr = cpu_to_le32 (req->req.dma);
+
+	/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
+	wmb ();
+	td->dmacount = cpu_to_le32(dmacount);
+}
+
+static const u32 dmactl_default =
+		  (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
+		| (1 << DMA_CLEAR_COUNT_ENABLE)
+		/* erratum 0116 workaround part 1 (use POLLING) */
+		| (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
+		| (1 << DMA_VALID_BIT_POLLING_ENABLE)
+		| (1 << DMA_VALID_BIT_ENABLE)
+		| (1 << DMA_SCATTER_GATHER_ENABLE)
+		/* erratum 0116 workaround part 2 (no AUTOSTART) */
+		| (1 << DMA_ENABLE);
+
+static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
+{
+	handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
+}
+
+static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
+{
+	writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
+	spin_stop_dma (dma);
+}
+
+static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
+{
+	struct net2280_dma_regs	__iomem *dma = ep->dma;
+	unsigned int tmp = (1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION);
+
+	if (ep->dev->pdev->device != 0x2280)
+		tmp |= (1 << END_OF_CHAIN);
+
+	writel (tmp, &dma->dmacount);
+	writel (readl (&dma->dmastat), &dma->dmastat);
+
+	writel (td_dma, &dma->dmadesc);
+	writel (dmactl, &dma->dmactl);
+
+	/* erratum 0116 workaround part 3:  pci arbiter away from net2280 */
+	(void) readl (&ep->dev->pci->pcimstctl);
+
+	writel ((1 << DMA_START), &dma->dmastat);
+
+	if (!ep->is_in)
+		stop_out_naking (ep);
+}
+
+static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
+{
+	u32			tmp;
+	struct net2280_dma_regs	__iomem *dma = ep->dma;
+
+	/* FIXME can't use DMA for ZLPs */
+
+	/* on this path we "know" there's no dma active (yet) */
+	WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
+	writel (0, &ep->dma->dmactl);
+
+	/* previous OUT packet might have been short */
+	if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
+				& (1 << NAK_OUT_PACKETS)) != 0) {
+		writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
+			&ep->regs->ep_stat);
+
+		tmp = readl (&ep->regs->ep_avail);
+		if (tmp) {
+			writel (readl (&dma->dmastat), &dma->dmastat);
+
+			/* transfer all/some fifo data */
+			writel (req->req.dma, &dma->dmaaddr);
+			tmp = min (tmp, req->req.length);
+
+			/* dma irq, faking scatterlist status */
+			req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
+			writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
+				| tmp, &dma->dmacount);
+			req->td->dmadesc = 0;
+			req->valid = 1;
+
+			writel ((1 << DMA_ENABLE), &dma->dmactl);
+			writel ((1 << DMA_START), &dma->dmastat);
+			return;
+		}
+	}
+
+	tmp = dmactl_default;
+
+	/* force packet boundaries between dma requests, but prevent the
+	 * controller from automagically writing a last "short" packet
+	 * (zero length) unless the driver explicitly said to do that.
+	 */
+	if (ep->is_in) {
+		if (likely ((req->req.length % ep->ep.maxpacket) != 0
+				|| req->req.zero)) {
+			tmp |= (1 << DMA_FIFO_VALIDATE);
+			ep->in_fifo_validate = 1;
+		} else
+			ep->in_fifo_validate = 0;
+	}
+
+	/* init req->td, pointing to the current dummy */
+	req->td->dmadesc = cpu_to_le32 (ep->td_dma);
+	fill_dma_desc (ep, req, 1);
+
+	if (!use_dma_chaining)
+		req->td->dmacount |= cpu_to_le32 (1 << END_OF_CHAIN);
+
+	start_queue (ep, tmp, req->td_dma);
+}
+
+static inline void
+queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
+{
+	struct net2280_dma	*end;
+	dma_addr_t		tmp;
+
+	/* swap new dummy for old, link; fill and maybe activate */
+	end = ep->dummy;
+	ep->dummy = req->td;
+	req->td = end;
+
+	tmp = ep->td_dma;
+	ep->td_dma = req->td_dma;
+	req->td_dma = tmp;
+
+	end->dmadesc = cpu_to_le32 (ep->td_dma);
+
+	fill_dma_desc (ep, req, valid);
+}
+
+static void
+done (struct net2280_ep *ep, struct net2280_request *req, int status)
+{
+	struct net2280		*dev;
+	unsigned		stopped = ep->stopped;
+
+	list_del_init (&req->queue);
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	dev = ep->dev;
+	if (ep->dma)
+		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
+
+	if (status && status != -ESHUTDOWN)
+		VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock (&dev->lock);
+	req->req.complete (&ep->ep, &req->req);
+	spin_lock (&dev->lock);
+	ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct net2280_request	*req;
+	struct net2280_ep	*ep;
+	struct net2280		*dev;
+	unsigned long		flags;
+
+	/* we always require a cpu-view buffer, so that we can
+	 * always use pio (as fallback or whatever).
+	 */
+	req = container_of (_req, struct net2280_request, req);
+	if (!_req || !_req->complete || !_req->buf
+			|| !list_empty (&req->queue))
+		return -EINVAL;
+	if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
+		return -EDOM;
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -EINVAL;
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	/* FIXME implement PIO fallback for ZLPs with DMA */
+	if (ep->dma && _req->length == 0)
+		return -EOPNOTSUPP;
+
+	/* set up dma mapping in case the caller didn't */
+	if (ep->dma) {
+		int ret;
+
+		ret = usb_gadget_map_request(&dev->gadget, _req,
+				ep->is_in);
+		if (ret)
+			return ret;
+	}
+
+#if 0
+	VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
+			_ep->name, _req, _req->length, _req->buf);
+#endif
+
+	spin_lock_irqsave (&dev->lock, flags);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/* kickstart this i/o queue? */
+	if (list_empty (&ep->queue) && !ep->stopped) {
+		/* use DMA if the endpoint supports it, else pio */
+		if (ep->dma)
+			start_dma (ep, req);
+		else {
+			/* maybe there's no control data, just status ack */
+			if (ep->num == 0 && _req->length == 0) {
+				allow_status (ep);
+				done (ep, req, 0);
+				VDEBUG (dev, "%s status ack\n", ep->ep.name);
+				goto done;
+			}
+
+			/* PIO ... stuff the fifo, or unblock it.  */
+			if (ep->is_in)
+				write_fifo (ep, _req);
+			else if (list_empty (&ep->queue)) {
+				u32	s;
+
+				/* OUT FIFO might have packet(s) buffered */
+				s = readl (&ep->regs->ep_stat);
+				if ((s & (1 << FIFO_EMPTY)) == 0) {
+					/* note:  _req->short_not_ok is
+					 * ignored here since PIO _always_
+					 * stops queue advance here, and
+					 * _req->status doesn't change for
+					 * short reads (only _req->actual)
+					 */
+					if (read_fifo (ep, req)) {
+						done (ep, req, 0);
+						if (ep->num == 0)
+							allow_status (ep);
+						/* don't queue it */
+						req = NULL;
+					} else
+						s = readl (&ep->regs->ep_stat);
+				}
+
+				/* don't NAK, let the fifo fill */
+				if (req && (s & (1 << NAK_OUT_PACKETS)))
+					writel ((1 << CLEAR_NAK_OUT_PACKETS),
+							&ep->regs->ep_rsp);
+			}
+		}
+
+	} else if (ep->dma) {
+		int	valid = 1;
+
+		if (ep->is_in) {
+			int	expect;
+
+			/* preventing magic zlps is per-engine state, not
+			 * per-transfer; irq logic must recover hiccups.
+			 */
+			expect = likely (req->req.zero
+				|| (req->req.length % ep->ep.maxpacket) != 0);
+			if (expect != ep->in_fifo_validate)
+				valid = 0;
+		}
+		queue_dma (ep, req, valid);
+
+	} /* else the irq handler advances the queue. */
+
+	ep->responded = 1;
+	if (req)
+		list_add_tail (&req->queue, &ep->queue);
+done:
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	/* pci writes may still be posted */
+	return 0;
+}
+
+static inline void
+dma_done (
+	struct net2280_ep *ep,
+	struct net2280_request *req,
+	u32 dmacount,
+	int status
+)
+{
+	req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
+	done (ep, req, status);
+}
+
+static void restart_dma (struct net2280_ep *ep);
+
+static void scan_dma_completions (struct net2280_ep *ep)
+{
+	/* only look at descriptors that were "naturally" retired,
+	 * so fifo and list head state won't matter
+	 */
+	while (!list_empty (&ep->queue)) {
+		struct net2280_request	*req;
+		u32			tmp;
+
+		req = list_entry (ep->queue.next,
+				struct net2280_request, queue);
+		if (!req->valid)
+			break;
+		rmb ();
+		tmp = le32_to_cpup (&req->td->dmacount);
+		if ((tmp & (1 << VALID_BIT)) != 0)
+			break;
+
+		/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
+		 * cases where DMA must be aborted; this code handles
+		 * all non-abort DMA completions.
+		 */
+		if (unlikely (req->td->dmadesc == 0)) {
+			/* paranoia */
+			tmp = readl (&ep->dma->dmacount);
+			if (tmp & DMA_BYTE_COUNT_MASK)
+				break;
+			/* single transfer mode */
+			dma_done (ep, req, tmp, 0);
+			break;
+		} else if (!ep->is_in
+				&& (req->req.length % ep->ep.maxpacket) != 0) {
+			tmp = readl (&ep->regs->ep_stat);
+
+			/* AVOID TROUBLE HERE by not issuing short reads from
+			 * your gadget driver.  That helps avoids errata 0121,
+			 * 0122, and 0124; not all cases trigger the warning.
+			 */
+			if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
+				WARNING (ep->dev, "%s lost packet sync!\n",
+						ep->ep.name);
+				req->req.status = -EOVERFLOW;
+			} else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
+				/* fifo gets flushed later */
+				ep->out_overflow = 1;
+				DEBUG (ep->dev, "%s dma, discard %d len %d\n",
+						ep->ep.name, tmp,
+						req->req.length);
+				req->req.status = -EOVERFLOW;
+			}
+		}
+		dma_done (ep, req, tmp, 0);
+	}
+}
+
+static void restart_dma (struct net2280_ep *ep)
+{
+	struct net2280_request	*req;
+	u32			dmactl = dmactl_default;
+
+	if (ep->stopped)
+		return;
+	req = list_entry (ep->queue.next, struct net2280_request, queue);
+
+	if (!use_dma_chaining) {
+		start_dma (ep, req);
+		return;
+	}
+
+	/* the 2280 will be processing the queue unless queue hiccups after
+	 * the previous transfer:
+	 *  IN:   wanted automagic zlp, head doesn't (or vice versa)
+	 *        DMA_FIFO_VALIDATE doesn't init from dma descriptors.
+	 *  OUT:  was "usb-short", we must restart.
+	 */
+	if (ep->is_in && !req->valid) {
+		struct net2280_request	*entry, *prev = NULL;
+		int			reqmode, done = 0;
+
+		DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
+		ep->in_fifo_validate = likely (req->req.zero
+			|| (req->req.length % ep->ep.maxpacket) != 0);
+		if (ep->in_fifo_validate)
+			dmactl |= (1 << DMA_FIFO_VALIDATE);
+		list_for_each_entry (entry, &ep->queue, queue) {
+			__le32		dmacount;
+
+			if (entry == req)
+				continue;
+			dmacount = entry->td->dmacount;
+			if (!done) {
+				reqmode = likely (entry->req.zero
+					|| (entry->req.length
+						% ep->ep.maxpacket) != 0);
+				if (reqmode == ep->in_fifo_validate) {
+					entry->valid = 1;
+					dmacount |= valid_bit;
+					entry->td->dmacount = dmacount;
+					prev = entry;
+					continue;
+				} else {
+					/* force a hiccup */
+					prev->td->dmacount |= dma_done_ie;
+					done = 1;
+				}
+			}
+
+			/* walk the rest of the queue so unlinks behave */
+			entry->valid = 0;
+			dmacount &= ~valid_bit;
+			entry->td->dmacount = dmacount;
+			prev = entry;
+		}
+	}
+
+	writel (0, &ep->dma->dmactl);
+	start_queue (ep, dmactl, req->td_dma);
+}
+
+static void abort_dma (struct net2280_ep *ep)
+{
+	/* abort the current transfer */
+	if (likely (!list_empty (&ep->queue))) {
+		/* FIXME work around errata 0121, 0122, 0124 */
+		writel ((1 << DMA_ABORT), &ep->dma->dmastat);
+		spin_stop_dma (ep->dma);
+	} else
+		stop_dma (ep->dma);
+	scan_dma_completions (ep);
+}
+
+/* dequeue ALL requests */
+static void nuke (struct net2280_ep *ep)
+{
+	struct net2280_request	*req;
+
+	/* called with spinlock held */
+	ep->stopped = 1;
+	if (ep->dma)
+		abort_dma (ep);
+	while (!list_empty (&ep->queue)) {
+		req = list_entry (ep->queue.next,
+				struct net2280_request,
+				queue);
+		done (ep, req, -ESHUTDOWN);
+	}
+}
+
+/* dequeue JUST ONE request */
+static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct net2280_ep	*ep;
+	struct net2280_request	*req;
+	unsigned long		flags;
+	u32			dmactl;
+	int			stopped;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave (&ep->dev->lock, flags);
+	stopped = ep->stopped;
+
+	/* quiesce dma while we patch the queue */
+	dmactl = 0;
+	ep->stopped = 1;
+	if (ep->dma) {
+		dmactl = readl (&ep->dma->dmactl);
+		/* WARNING erratum 0127 may kick in ... */
+		stop_dma (ep->dma);
+		scan_dma_completions (ep);
+	}
+
+	/* make sure it's still queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore (&ep->dev->lock, flags);
+		return -EINVAL;
+	}
+
+	/* queue head may be partially complete. */
+	if (ep->queue.next == &req->queue) {
+		if (ep->dma) {
+			DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
+			_req->status = -ECONNRESET;
+			abort_dma (ep);
+			if (likely (ep->queue.next == &req->queue)) {
+				// NOTE: misreports single-transfer mode
+				req->td->dmacount = 0;	/* invalidate */
+				dma_done (ep, req,
+					readl (&ep->dma->dmacount),
+					-ECONNRESET);
+			}
+		} else {
+			DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
+			done (ep, req, -ECONNRESET);
+		}
+		req = NULL;
+
+	/* patch up hardware chaining data */
+	} else if (ep->dma && use_dma_chaining) {
+		if (req->queue.prev == ep->queue.next) {
+			writel (le32_to_cpu (req->td->dmadesc),
+				&ep->dma->dmadesc);
+			if (req->td->dmacount & dma_done_ie)
+				writel (readl (&ep->dma->dmacount)
+						| le32_to_cpu(dma_done_ie),
+					&ep->dma->dmacount);
+		} else {
+			struct net2280_request	*prev;
+
+			prev = list_entry (req->queue.prev,
+				struct net2280_request, queue);
+			prev->td->dmadesc = req->td->dmadesc;
+			if (req->td->dmacount & dma_done_ie)
+				prev->td->dmacount |= dma_done_ie;
+		}
+	}
+
+	if (req)
+		done (ep, req, -ECONNRESET);
+	ep->stopped = stopped;
+
+	if (ep->dma) {
+		/* turn off dma on inactive queues */
+		if (list_empty (&ep->queue))
+			stop_dma (ep->dma);
+		else if (!ep->stopped) {
+			/* resume current request, or start new one */
+			if (req)
+				writel (dmactl, &ep->dma->dmactl);
+			else
+				start_dma (ep, list_entry (ep->queue.next,
+					struct net2280_request, queue));
+		}
+	}
+
+	spin_unlock_irqrestore (&ep->dev->lock, flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int net2280_fifo_status (struct usb_ep *_ep);
+
+static int
+net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+	struct net2280_ep	*ep;
+	unsigned long		flags;
+	int			retval = 0;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -EINVAL;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+	if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
+						== USB_ENDPOINT_XFER_ISOC)
+		return -EINVAL;
+
+	spin_lock_irqsave (&ep->dev->lock, flags);
+	if (!list_empty (&ep->queue))
+		retval = -EAGAIN;
+	else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
+		retval = -EAGAIN;
+	else {
+		VDEBUG (ep->dev, "%s %s %s\n", _ep->name,
+				value ? "set" : "clear",
+				wedged ? "wedge" : "halt");
+		/* set/clear, then synch memory views with the device */
+		if (value) {
+			if (ep->num == 0)
+				ep->dev->protocol_stall = 1;
+			else
+				set_halt (ep);
+			if (wedged)
+				ep->wedged = 1;
+		} else {
+			clear_halt (ep);
+			ep->wedged = 0;
+		}
+		(void) readl (&ep->regs->ep_rsp);
+	}
+	spin_unlock_irqrestore (&ep->dev->lock, flags);
+
+	return retval;
+}
+
+static int
+net2280_set_halt(struct usb_ep *_ep, int value)
+{
+	return net2280_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int
+net2280_set_wedge(struct usb_ep *_ep)
+{
+	if (!_ep || _ep->name == ep0name)
+		return -EINVAL;
+	return net2280_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static int
+net2280_fifo_status (struct usb_ep *_ep)
+{
+	struct net2280_ep	*ep;
+	u32			avail;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return -ENODEV;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
+	if (avail > ep->fifo_size)
+		return -EOVERFLOW;
+	if (ep->is_in)
+		avail = ep->fifo_size - avail;
+	return avail;
+}
+
+static void
+net2280_fifo_flush (struct usb_ep *_ep)
+{
+	struct net2280_ep	*ep;
+
+	ep = container_of (_ep, struct net2280_ep, ep);
+	if (!_ep || (!ep->desc && ep->num != 0))
+		return;
+	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return;
+
+	writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
+	(void) readl (&ep->regs->ep_rsp);
+}
+
+static const struct usb_ep_ops net2280_ep_ops = {
+	.enable		= net2280_enable,
+	.disable	= net2280_disable,
+
+	.alloc_request	= net2280_alloc_request,
+	.free_request	= net2280_free_request,
+
+	.queue		= net2280_queue,
+	.dequeue	= net2280_dequeue,
+
+	.set_halt	= net2280_set_halt,
+	.set_wedge	= net2280_set_wedge,
+	.fifo_status	= net2280_fifo_status,
+	.fifo_flush	= net2280_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int net2280_get_frame (struct usb_gadget *_gadget)
+{
+	struct net2280		*dev;
+	unsigned long		flags;
+	u16			retval;
+
+	if (!_gadget)
+		return -ENODEV;
+	dev = container_of (_gadget, struct net2280, gadget);
+	spin_lock_irqsave (&dev->lock, flags);
+	retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
+	spin_unlock_irqrestore (&dev->lock, flags);
+	return retval;
+}
+
+static int net2280_wakeup (struct usb_gadget *_gadget)
+{
+	struct net2280		*dev;
+	u32			tmp;
+	unsigned long		flags;
+
+	if (!_gadget)
+		return 0;
+	dev = container_of (_gadget, struct net2280, gadget);
+
+	spin_lock_irqsave (&dev->lock, flags);
+	tmp = readl (&dev->usb->usbctl);
+	if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
+		writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	/* pci writes may still be posted */
+	return 0;
+}
+
+static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
+{
+	struct net2280		*dev;
+	u32			tmp;
+	unsigned long		flags;
+
+	if (!_gadget)
+		return 0;
+	dev = container_of (_gadget, struct net2280, gadget);
+
+	spin_lock_irqsave (&dev->lock, flags);
+	tmp = readl (&dev->usb->usbctl);
+	if (value)
+		tmp |= (1 << SELF_POWERED_STATUS);
+	else
+		tmp &= ~(1 << SELF_POWERED_STATUS);
+	writel (tmp, &dev->usb->usbctl);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	return 0;
+}
+
+static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
+{
+	struct net2280  *dev;
+	u32             tmp;
+	unsigned long   flags;
+
+	if (!_gadget)
+		return -ENODEV;
+	dev = container_of (_gadget, struct net2280, gadget);
+
+	spin_lock_irqsave (&dev->lock, flags);
+	tmp = readl (&dev->usb->usbctl);
+	dev->softconnect = (is_on != 0);
+	if (is_on)
+		tmp |= (1 << USB_DETECT_ENABLE);
+	else
+		tmp &= ~(1 << USB_DETECT_ENABLE);
+	writel (tmp, &dev->usb->usbctl);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	return 0;
+}
+
+static int net2280_start(struct usb_gadget *_gadget,
+		struct usb_gadget_driver *driver);
+static int net2280_stop(struct usb_gadget *_gadget,
+		struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops net2280_ops = {
+	.get_frame	= net2280_get_frame,
+	.wakeup		= net2280_wakeup,
+	.set_selfpowered = net2280_set_selfpowered,
+	.pullup		= net2280_pullup,
+	.udc_start	= net2280_start,
+	.udc_stop	= net2280_stop,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_USB_GADGET_DEBUG_FILES
+
+/* FIXME move these into procfs, and use seq_file.
+ * Sysfs _still_ doesn't behave for arbitrarily sized files,
+ * and also doesn't help products using this with 2.4 kernels.
+ */
+
+/* "function" sysfs attribute */
+static ssize_t
+show_function (struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct net2280	*dev = dev_get_drvdata (_dev);
+
+	if (!dev->driver
+			|| !dev->driver->function
+			|| strlen (dev->driver->function) > PAGE_SIZE)
+		return 0;
+	return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
+}
+static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
+
+static ssize_t net2280_show_registers(struct device *_dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct net2280		*dev;
+	char			*next;
+	unsigned		size, t;
+	unsigned long		flags;
+	int			i;
+	u32			t1, t2;
+	const char		*s;
+
+	dev = dev_get_drvdata (_dev);
+	next = buf;
+	size = PAGE_SIZE;
+	spin_lock_irqsave (&dev->lock, flags);
+
+	if (dev->driver)
+		s = dev->driver->driver.name;
+	else
+		s = "(none)";
+
+	/* Main Control Registers */
+	t = scnprintf (next, size, "%s version " DRIVER_VERSION
+			", chiprev %04x, dma %s\n\n"
+			"devinit %03x fifoctl %08x gadget '%s'\n"
+			"pci irqenb0 %02x irqenb1 %08x "
+			"irqstat0 %04x irqstat1 %08x\n",
+			driver_name, dev->chiprev,
+			use_dma
+				? (use_dma_chaining ? "chaining" : "enabled")
+				: "disabled",
+			readl (&dev->regs->devinit),
+			readl (&dev->regs->fifoctl),
+			s,
+			readl (&dev->regs->pciirqenb0),
+			readl (&dev->regs->pciirqenb1),
+			readl (&dev->regs->irqstat0),
+			readl (&dev->regs->irqstat1));
+	size -= t;
+	next += t;
+
+	/* USB Control Registers */
+	t1 = readl (&dev->usb->usbctl);
+	t2 = readl (&dev->usb->usbstat);
+	if (t1 & (1 << VBUS_PIN)) {
+		if (t2 & (1 << HIGH_SPEED))
+			s = "high speed";
+		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+			s = "powered";
+		else
+			s = "full speed";
+		/* full speed bit (6) not working?? */
+	} else
+			s = "not attached";
+	t = scnprintf (next, size,
+			"stdrsp %08x usbctl %08x usbstat %08x "
+				"addr 0x%02x (%s)\n",
+			readl (&dev->usb->stdrsp), t1, t2,
+			readl (&dev->usb->ouraddr), s);
+	size -= t;
+	next += t;
+
+	/* PCI Master Control Registers */
+
+	/* DMA Control Registers */
+
+	/* Configurable EP Control Registers */
+	for (i = 0; i < 7; i++) {
+		struct net2280_ep	*ep;
+
+		ep = &dev->ep [i];
+		if (i && !ep->desc)
+			continue;
+
+		t1 = readl (&ep->regs->ep_cfg);
+		t2 = readl (&ep->regs->ep_rsp) & 0xff;
+		t = scnprintf (next, size,
+				"\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
+					"irqenb %02x\n",
+				ep->ep.name, t1, t2,
+				(t2 & (1 << CLEAR_NAK_OUT_PACKETS))
+					? "NAK " : "",
+				(t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
+					? "hide " : "",
+				(t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
+					? "CRC " : "",
+				(t2 & (1 << CLEAR_INTERRUPT_MODE))
+					? "interrupt " : "",
+				(t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
+					? "status " : "",
+				(t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
+					? "NAKmode " : "",
+				(t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
+					? "DATA1 " : "DATA0 ",
+				(t2 & (1 << CLEAR_ENDPOINT_HALT))
+					? "HALT " : "",
+				readl (&ep->regs->ep_irqenb));
+		size -= t;
+		next += t;
+
+		t = scnprintf (next, size,
+				"\tstat %08x avail %04x "
+				"(ep%d%s-%s)%s\n",
+				readl (&ep->regs->ep_stat),
+				readl (&ep->regs->ep_avail),
+				t1 & 0x0f, DIR_STRING (t1),
+				type_string (t1 >> 8),
+				ep->stopped ? "*" : "");
+		size -= t;
+		next += t;
+
+		if (!ep->dma)
+			continue;
+
+		t = scnprintf (next, size,
+				"  dma\tctl %08x stat %08x count %08x\n"
+				"\taddr %08x desc %08x\n",
+				readl (&ep->dma->dmactl),
+				readl (&ep->dma->dmastat),
+				readl (&ep->dma->dmacount),
+				readl (&ep->dma->dmaaddr),
+				readl (&ep->dma->dmadesc));
+		size -= t;
+		next += t;
+
+	}
+
+	/* Indexed Registers */
+		// none yet
+
+	/* Statistics */
+	t = scnprintf (next, size, "\nirqs:  ");
+	size -= t;
+	next += t;
+	for (i = 0; i < 7; i++) {
+		struct net2280_ep	*ep;
+
+		ep = &dev->ep [i];
+		if (i && !ep->irqs)
+			continue;
+		t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
+		size -= t;
+		next += t;
+
+	}
+	t = scnprintf (next, size, "\n");
+	size -= t;
+	next += t;
+
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, net2280_show_registers, NULL);
+
+static ssize_t
+show_queues (struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct net2280		*dev;
+	char			*next;
+	unsigned		size;
+	unsigned long		flags;
+	int			i;
+
+	dev = dev_get_drvdata (_dev);
+	next = buf;
+	size = PAGE_SIZE;
+	spin_lock_irqsave (&dev->lock, flags);
+
+	for (i = 0; i < 7; i++) {
+		struct net2280_ep		*ep = &dev->ep [i];
+		struct net2280_request		*req;
+		int				t;
+
+		if (i != 0) {
+			const struct usb_endpoint_descriptor	*d;
+
+			d = ep->desc;
+			if (!d)
+				continue;
+			t = d->bEndpointAddress;
+			t = scnprintf (next, size,
+				"\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
+				ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
+				(t & USB_DIR_IN) ? "in" : "out",
+				({ char *val;
+				 switch (d->bmAttributes & 0x03) {
+				 case USB_ENDPOINT_XFER_BULK:
+					val = "bulk"; break;
+				 case USB_ENDPOINT_XFER_INT:
+					val = "intr"; break;
+				 default:
+					val = "iso"; break;
+				 }; val; }),
+				usb_endpoint_maxp (d) & 0x1fff,
+				ep->dma ? "dma" : "pio", ep->fifo_size
+				);
+		} else /* ep0 should only have one transfer queued */
+			t = scnprintf (next, size, "ep0 max 64 pio %s\n",
+					ep->is_in ? "in" : "out");
+		if (t <= 0 || t > size)
+			goto done;
+		size -= t;
+		next += t;
+
+		if (list_empty (&ep->queue)) {
+			t = scnprintf (next, size, "\t(nothing queued)\n");
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+			continue;
+		}
+		list_for_each_entry (req, &ep->queue, queue) {
+			if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
+				t = scnprintf (next, size,
+					"\treq %p len %d/%d "
+					"buf %p (dmacount %08x)\n",
+					&req->req, req->req.actual,
+					req->req.length, req->req.buf,
+					readl (&ep->dma->dmacount));
+			else
+				t = scnprintf (next, size,
+					"\treq %p len %d/%d buf %p\n",
+					&req->req, req->req.actual,
+					req->req.length, req->req.buf);
+			if (t <= 0 || t > size)
+				goto done;
+			size -= t;
+			next += t;
+
+			if (ep->dma) {
+				struct net2280_dma	*td;
+
+				td = req->td;
+				t = scnprintf (next, size, "\t    td %08x "
+					" count %08x buf %08x desc %08x\n",
+					(u32) req->td_dma,
+					le32_to_cpu (td->dmacount),
+					le32_to_cpu (td->dmaaddr),
+					le32_to_cpu (td->dmadesc));
+				if (t <= 0 || t > size)
+					goto done;
+				size -= t;
+				next += t;
+			}
+		}
+	}
+
+done:
+	spin_unlock_irqrestore (&dev->lock, flags);
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
+
+
+#else
+
+#define device_create_file(a,b)	(0)
+#define device_remove_file(a,b)	do { } while (0)
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* another driver-specific mode might be a request type doing dma
+ * to/from another device fifo instead of to/from memory.
+ */
+
+static void set_fifo_mode (struct net2280 *dev, int mode)
+{
+	/* keeping high bits preserves BAR2 */
+	writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
+
+	/* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
+	INIT_LIST_HEAD (&dev->gadget.ep_list);
+	list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
+	list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
+	switch (mode) {
+	case 0:
+		list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
+		list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
+		dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
+		break;
+	case 1:
+		dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
+		break;
+	case 2:
+		list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
+		dev->ep [1].fifo_size = 2048;
+		dev->ep [2].fifo_size = 1024;
+		break;
+	}
+	/* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
+	list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
+	list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
+}
+
+/* keeping it simple:
+ * - one bus driver, initted first;
+ * - one function driver, initted second
+ *
+ * most of the work to support multiple net2280 controllers would
+ * be to associate this gadget driver (yes?) with all of them, or
+ * perhaps to bind specific drivers to specific devices.
+ */
+
+static void usb_reset (struct net2280 *dev)
+{
+	u32	tmp;
+
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	(void) readl (&dev->usb->usbctl);
+
+	net2280_led_init (dev);
+
+	/* disable automatic responses, and irqs */
+	writel (0, &dev->usb->stdrsp);
+	writel (0, &dev->regs->pciirqenb0);
+	writel (0, &dev->regs->pciirqenb1);
+
+	/* clear old dma and irq state */
+	for (tmp = 0; tmp < 4; tmp++) {
+		struct net2280_ep	*ep = &dev->ep [tmp + 1];
+
+		if (ep->dma)
+			abort_dma (ep);
+	}
+	writel (~0, &dev->regs->irqstat0),
+	writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
+
+	/* reset, and enable pci */
+	tmp = readl (&dev->regs->devinit)
+		| (1 << PCI_ENABLE)
+		| (1 << FIFO_SOFT_RESET)
+		| (1 << USB_SOFT_RESET)
+		| (1 << M8051_RESET);
+	writel (tmp, &dev->regs->devinit);
+
+	/* standard fifo and endpoint allocations */
+	set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
+}
+
+static void usb_reinit (struct net2280 *dev)
+{
+	u32	tmp;
+	int	init_dma;
+
+	/* use_dma changes are ignored till next device re-init */
+	init_dma = use_dma;
+
+	/* basic endpoint init */
+	for (tmp = 0; tmp < 7; tmp++) {
+		struct net2280_ep	*ep = &dev->ep [tmp];
+
+		ep->ep.name = ep_name [tmp];
+		ep->dev = dev;
+		ep->num = tmp;
+
+		if (tmp > 0 && tmp <= 4) {
+			ep->fifo_size = 1024;
+			if (init_dma)
+				ep->dma = &dev->dma [tmp - 1];
+		} else
+			ep->fifo_size = 64;
+		ep->regs = &dev->epregs [tmp];
+		ep_reset (dev->regs, ep);
+	}
+	dev->ep [0].ep.maxpacket = 64;
+	dev->ep [5].ep.maxpacket = 64;
+	dev->ep [6].ep.maxpacket = 64;
+
+	dev->gadget.ep0 = &dev->ep [0].ep;
+	dev->ep [0].stopped = 0;
+	INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
+
+	/* we want to prevent lowlevel/insecure access from the USB host,
+	 * but erratum 0119 means this enable bit is ignored
+	 */
+	for (tmp = 0; tmp < 5; tmp++)
+		writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
+}
+
+static void ep0_start (struct net2280 *dev)
+{
+	writel (  (1 << CLEAR_EP_HIDE_STATUS_PHASE)
+		| (1 << CLEAR_NAK_OUT_PACKETS)
+		| (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
+		, &dev->epregs [0].ep_rsp);
+
+	/*
+	 * hardware optionally handles a bunch of standard requests
+	 * that the API hides from drivers anyway.  have it do so.
+	 * endpoint status/features are handled in software, to
+	 * help pass tests for some dubious behavior.
+	 */
+	writel (  (1 << SET_TEST_MODE)
+		| (1 << SET_ADDRESS)
+		| (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
+		| (1 << GET_DEVICE_STATUS)
+		| (1 << GET_INTERFACE_STATUS)
+		, &dev->usb->stdrsp);
+	writel (  (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
+		| (1 << SELF_POWERED_USB_DEVICE)
+		| (1 << REMOTE_WAKEUP_SUPPORT)
+		| (dev->softconnect << USB_DETECT_ENABLE)
+		| (1 << SELF_POWERED_STATUS)
+		, &dev->usb->usbctl);
+
+	/* enable irqs so we can see ep0 and general operation  */
+	writel (  (1 << SETUP_PACKET_INTERRUPT_ENABLE)
+		| (1 << ENDPOINT_0_INTERRUPT_ENABLE)
+		, &dev->regs->pciirqenb0);
+	writel (  (1 << PCI_INTERRUPT_ENABLE)
+		| (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
+		| (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
+		| (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
+		| (1 << VBUS_INTERRUPT_ENABLE)
+		| (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
+		| (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
+		, &dev->regs->pciirqenb1);
+
+	/* don't leave any writes posted */
+	(void) readl (&dev->usb->usbctl);
+}
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int net2280_start(struct usb_gadget *_gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct net2280		*dev;
+	int			retval;
+	unsigned		i;
+
+	/* insist on high speed support from the driver, since
+	 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
+	 * "must not be used in normal operation"
+	 */
+	if (!driver || driver->max_speed < USB_SPEED_HIGH
+			|| !driver->setup)
+		return -EINVAL;
+
+	dev = container_of (_gadget, struct net2280, gadget);
+
+	for (i = 0; i < 7; i++)
+		dev->ep [i].irqs = 0;
+
+	/* hook up the driver ... */
+	dev->softconnect = 1;
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+
+	retval = device_create_file (&dev->pdev->dev, &dev_attr_function);
+	if (retval) goto err_unbind;
+	retval = device_create_file (&dev->pdev->dev, &dev_attr_queues);
+	if (retval) goto err_func;
+
+	/* ... then enable host detection and ep0; and we're ready
+	 * for set_configuration as well as eventual disconnect.
+	 */
+	net2280_led_active (dev, 1);
+	ep0_start (dev);
+
+	DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
+			driver->driver.name,
+			readl (&dev->usb->usbctl),
+			readl (&dev->usb->stdrsp));
+
+	/* pci writes may still be posted */
+	return 0;
+
+err_func:
+	device_remove_file (&dev->pdev->dev, &dev_attr_function);
+err_unbind:
+	driver->unbind (&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+	return retval;
+}
+
+static void
+stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
+{
+	int			i;
+
+	/* don't disconnect if it's not connected */
+	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+
+	/* stop hardware; prevent new request submissions;
+	 * and kill any outstanding requests.
+	 */
+	usb_reset (dev);
+	for (i = 0; i < 7; i++)
+		nuke (&dev->ep [i]);
+
+	usb_reinit (dev);
+}
+
+static int net2280_stop(struct usb_gadget *_gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct net2280	*dev;
+	unsigned long	flags;
+
+	dev = container_of (_gadget, struct net2280, gadget);
+
+	spin_lock_irqsave (&dev->lock, flags);
+	stop_activity (dev, driver);
+	spin_unlock_irqrestore (&dev->lock, flags);
+
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	net2280_led_active (dev, 0);
+	device_remove_file (&dev->pdev->dev, &dev_attr_function);
+	device_remove_file (&dev->pdev->dev, &dev_attr_queues);
+
+	DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
+ * also works for dma-capable endpoints, in pio mode or just
+ * to manually advance the queue after short OUT transfers.
+ */
+static void handle_ep_small (struct net2280_ep *ep)
+{
+	struct net2280_request	*req;
+	u32			t;
+	/* 0 error, 1 mid-data, 2 done */
+	int			mode = 1;
+
+	if (!list_empty (&ep->queue))
+		req = list_entry (ep->queue.next,
+			struct net2280_request, queue);
+	else
+		req = NULL;
+
+	/* ack all, and handle what we care about */
+	t = readl (&ep->regs->ep_stat);
+	ep->irqs++;
+#if 0
+	VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
+			ep->ep.name, t, req ? &req->req : 0);
+#endif
+	if (!ep->is_in || ep->dev->pdev->device == 0x2280)
+		writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
+	else
+		/* Added for 2282 */
+		writel (t, &ep->regs->ep_stat);
+
+	/* for ep0, monitor token irqs to catch data stage length errors
+	 * and to synchronize on status.
+	 *
+	 * also, to defer reporting of protocol stalls ... here's where
+	 * data or status first appears, handling stalls here should never
+	 * cause trouble on the host side..
+	 *
+	 * control requests could be slightly faster without token synch for
+	 * status, but status can jam up that way.
+	 */
+	if (unlikely (ep->num == 0)) {
+		if (ep->is_in) {
+			/* status; stop NAKing */
+			if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
+				if (ep->dev->protocol_stall) {
+					ep->stopped = 1;
+					set_halt (ep);
+				}
+				if (!req)
+					allow_status (ep);
+				mode = 2;
+			/* reply to extra IN data tokens with a zlp */
+			} else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
+				if (ep->dev->protocol_stall) {
+					ep->stopped = 1;
+					set_halt (ep);
+					mode = 2;
+				} else if (ep->responded &&
+						!req && !ep->stopped)
+					write_fifo (ep, NULL);
+			}
+		} else {
+			/* status; stop NAKing */
+			if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
+				if (ep->dev->protocol_stall) {
+					ep->stopped = 1;
+					set_halt (ep);
+				}
+				mode = 2;
+			/* an extra OUT token is an error */
+			} else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
+					&& req
+					&& req->req.actual == req->req.length)
+					|| (ep->responded && !req)) {
+				ep->dev->protocol_stall = 1;
+				set_halt (ep);
+				ep->stopped = 1;
+				if (req)
+					done (ep, req, -EOVERFLOW);
+				req = NULL;
+			}
+		}
+	}
+
+	if (unlikely (!req))
+		return;
+
+	/* manual DMA queue advance after short OUT */
+	if (likely (ep->dma != 0)) {
+		if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
+			u32	count;
+			int	stopped = ep->stopped;
+
+			/* TRANSFERRED works around OUT_DONE erratum 0112.
+			 * we expect (N <= maxpacket) bytes; host wrote M.
+			 * iff (M < N) we won't ever see a DMA interrupt.
+			 */
+			ep->stopped = 1;
+			for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
+
+				/* any preceding dma transfers must finish.
+				 * dma handles (M >= N), may empty the queue
+				 */
+				scan_dma_completions (ep);
+				if (unlikely (list_empty (&ep->queue)
+						|| ep->out_overflow)) {
+					req = NULL;
+					break;
+				}
+				req = list_entry (ep->queue.next,
+					struct net2280_request, queue);
+
+				/* here either (M < N), a "real" short rx;
+				 * or (M == N) and the queue didn't empty
+				 */
+				if (likely (t & (1 << FIFO_EMPTY))) {
+					count = readl (&ep->dma->dmacount);
+					count &= DMA_BYTE_COUNT_MASK;
+					if (readl (&ep->dma->dmadesc)
+							!= req->td_dma)
+						req = NULL;
+					break;
+				}
+				udelay(1);
+			}
+
+			/* stop DMA, leave ep NAKing */
+			writel ((1 << DMA_ABORT), &ep->dma->dmastat);
+			spin_stop_dma (ep->dma);
+
+			if (likely (req)) {
+				req->td->dmacount = 0;
+				t = readl (&ep->regs->ep_avail);
+				dma_done (ep, req, count,
+					(ep->out_overflow || t)
+						? -EOVERFLOW : 0);
+			}
+
+			/* also flush to prevent erratum 0106 trouble */
+			if (unlikely (ep->out_overflow
+					|| (ep->dev->chiprev == 0x0100
+						&& ep->dev->gadget.speed
+							== USB_SPEED_FULL))) {
+				out_flush (ep);
+				ep->out_overflow = 0;
+			}
+
+			/* (re)start dma if needed, stop NAKing */
+			ep->stopped = stopped;
+			if (!list_empty (&ep->queue))
+				restart_dma (ep);
+		} else
+			DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
+					ep->ep.name, t);
+		return;
+
+	/* data packet(s) received (in the fifo, OUT) */
+	} else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
+		if (read_fifo (ep, req) && ep->num != 0)
+			mode = 2;
+
+	/* data packet(s) transmitted (IN) */
+	} else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
+		unsigned	len;
+
+		len = req->req.length - req->req.actual;
+		if (len > ep->ep.maxpacket)
+			len = ep->ep.maxpacket;
+		req->req.actual += len;
+
+		/* if we wrote it all, we're usually done */
+		if (req->req.actual == req->req.length) {
+			if (ep->num == 0) {
+				/* send zlps until the status stage */
+			} else if (!req->req.zero || len != ep->ep.maxpacket)
+				mode = 2;
+		}
+
+	/* there was nothing to do ...  */
+	} else if (mode == 1)
+		return;
+
+	/* done */
+	if (mode == 2) {
+		/* stream endpoints often resubmit/unlink in completion */
+		done (ep, req, 0);
+
+		/* maybe advance queue to next request */
+		if (ep->num == 0) {
+			/* NOTE:  net2280 could let gadget driver start the
+			 * status stage later. since not all controllers let
+			 * them control that, the api doesn't (yet) allow it.
+			 */
+			if (!ep->stopped)
+				allow_status (ep);
+			req = NULL;
+		} else {
+			if (!list_empty (&ep->queue) && !ep->stopped)
+				req = list_entry (ep->queue.next,
+					struct net2280_request, queue);
+			else
+				req = NULL;
+			if (req && !ep->is_in)
+				stop_out_naking (ep);
+		}
+	}
+
+	/* is there a buffer for the next packet?
+	 * for best streaming performance, make sure there is one.
+	 */
+	if (req && !ep->stopped) {
+
+		/* load IN fifo with next packet (may be zlp) */
+		if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
+			write_fifo (ep, &req->req);
+	}
+}
+
+static struct net2280_ep *
+get_ep_by_addr (struct net2280 *dev, u16 wIndex)
+{
+	struct net2280_ep	*ep;
+
+	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+		return &dev->ep [0];
+	list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
+		u8	bEndpointAddress;
+
+		if (!ep->desc)
+			continue;
+		bEndpointAddress = ep->desc->bEndpointAddress;
+		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+			continue;
+		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
+			return ep;
+	}
+	return NULL;
+}
+
+static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
+{
+	struct net2280_ep	*ep;
+	u32			num, scratch;
+
+	/* most of these don't need individual acks */
+	stat &= ~(1 << INTA_ASSERTED);
+	if (!stat)
+		return;
+	// DEBUG (dev, "irqstat0 %04x\n", stat);
+
+	/* starting a control request? */
+	if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
+		union {
+			u32			raw [2];
+			struct usb_ctrlrequest	r;
+		} u;
+		int				tmp;
+		struct net2280_request		*req;
+
+		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
+			if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
+				dev->gadget.speed = USB_SPEED_HIGH;
+			else
+				dev->gadget.speed = USB_SPEED_FULL;
+			net2280_led_speed (dev, dev->gadget.speed);
+			DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed));
+		}
+
+		ep = &dev->ep [0];
+		ep->irqs++;
+
+		/* make sure any leftover request state is cleared */
+		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
+		while (!list_empty (&ep->queue)) {
+			req = list_entry (ep->queue.next,
+					struct net2280_request, queue);
+			done (ep, req, (req->req.actual == req->req.length)
+						? 0 : -EPROTO);
+		}
+		ep->stopped = 0;
+		dev->protocol_stall = 0;
+
+		if (ep->dev->pdev->device == 0x2280)
+			tmp = (1 << FIFO_OVERFLOW)
+				| (1 << FIFO_UNDERFLOW);
+		else
+			tmp = 0;
+
+		writel (tmp | (1 << TIMEOUT)
+			| (1 << USB_STALL_SENT)
+			| (1 << USB_IN_NAK_SENT)
+			| (1 << USB_IN_ACK_RCVD)
+			| (1 << USB_OUT_PING_NAK_SENT)
+			| (1 << USB_OUT_ACK_SENT)
+			| (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
+			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
+			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+			| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+			| (1 << DATA_IN_TOKEN_INTERRUPT)
+			, &ep->regs->ep_stat);
+		u.raw [0] = readl (&dev->usb->setup0123);
+		u.raw [1] = readl (&dev->usb->setup4567);
+
+		cpu_to_le32s (&u.raw [0]);
+		cpu_to_le32s (&u.raw [1]);
+
+		tmp = 0;
+
+#define	w_value		le16_to_cpu(u.r.wValue)
+#define	w_index		le16_to_cpu(u.r.wIndex)
+#define	w_length	le16_to_cpu(u.r.wLength)
+
+		/* ack the irq */
+		writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
+		stat ^= (1 << SETUP_PACKET_INTERRUPT);
+
+		/* watch control traffic at the token level, and force
+		 * synchronization before letting the status stage happen.
+		 * FIXME ignore tokens we'll NAK, until driver responds.
+		 * that'll mean a lot less irqs for some drivers.
+		 */
+		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+		if (ep->is_in) {
+			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+				| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+				| (1 << DATA_IN_TOKEN_INTERRUPT);
+			stop_out_naking (ep);
+		} else
+			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+				| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
+				| (1 << DATA_IN_TOKEN_INTERRUPT);
+		writel (scratch, &dev->epregs [0].ep_irqenb);
+
+		/* we made the hardware handle most lowlevel requests;
+		 * everything else goes uplevel to the gadget code.
+		 */
+		ep->responded = 1;
+		switch (u.r.bRequest) {
+		case USB_REQ_GET_STATUS: {
+			struct net2280_ep	*e;
+			__le32			status;
+
+			/* hw handles device and interface status */
+			if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
+				goto delegate;
+			if ((e = get_ep_by_addr (dev, w_index)) == 0
+					|| w_length > 2)
+				goto do_stall;
+
+			if (readl (&e->regs->ep_rsp)
+					& (1 << SET_ENDPOINT_HALT))
+				status = cpu_to_le32 (1);
+			else
+				status = cpu_to_le32 (0);
+
+			/* don't bother with a request object! */
+			writel (0, &dev->epregs [0].ep_irqenb);
+			set_fifo_bytecount (ep, w_length);
+			writel ((__force u32)status, &dev->epregs [0].ep_data);
+			allow_status (ep);
+			VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
+			goto next_endpoints;
+			}
+			break;
+		case USB_REQ_CLEAR_FEATURE: {
+			struct net2280_ep	*e;
+
+			/* hw handles device features */
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (w_value != USB_ENDPOINT_HALT
+					|| w_length != 0)
+				goto do_stall;
+			if ((e = get_ep_by_addr (dev, w_index)) == 0)
+				goto do_stall;
+			if (e->wedged) {
+				VDEBUG(dev, "%s wedged, halt not cleared\n",
+						ep->ep.name);
+			} else {
+				VDEBUG(dev, "%s clear halt\n", ep->ep.name);
+				clear_halt(e);
+			}
+			allow_status (ep);
+			goto next_endpoints;
+			}
+			break;
+		case USB_REQ_SET_FEATURE: {
+			struct net2280_ep	*e;
+
+			/* hw handles device features */
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (w_value != USB_ENDPOINT_HALT
+					|| w_length != 0)
+				goto do_stall;
+			if ((e = get_ep_by_addr (dev, w_index)) == 0)
+				goto do_stall;
+			if (e->ep.name == ep0name)
+				goto do_stall;
+			set_halt (e);
+			allow_status (ep);
+			VDEBUG (dev, "%s set halt\n", ep->ep.name);
+			goto next_endpoints;
+			}
+			break;
+		default:
+delegate:
+			VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x "
+				"ep_cfg %08x\n",
+				u.r.bRequestType, u.r.bRequest,
+				w_value, w_index, w_length,
+				readl (&ep->regs->ep_cfg));
+			ep->responded = 0;
+			spin_unlock (&dev->lock);
+			tmp = dev->driver->setup (&dev->gadget, &u.r);
+			spin_lock (&dev->lock);
+		}
+
+		/* stall ep0 on error */
+		if (tmp < 0) {
+do_stall:
+			VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
+					u.r.bRequestType, u.r.bRequest, tmp);
+			dev->protocol_stall = 1;
+		}
+
+		/* some in/out token irq should follow; maybe stall then.
+		 * driver must queue a request (even zlp) or halt ep0
+		 * before the host times out.
+		 */
+	}
+
+#undef	w_value
+#undef	w_index
+#undef	w_length
+
+next_endpoints:
+	/* endpoint data irq ? */
+	scratch = stat & 0x7f;
+	stat &= ~0x7f;
+	for (num = 0; scratch; num++) {
+		u32		t;
+
+		/* do this endpoint's FIFO and queue need tending? */
+		t = 1 << num;
+		if ((scratch & t) == 0)
+			continue;
+		scratch ^= t;
+
+		ep = &dev->ep [num];
+		handle_ep_small (ep);
+	}
+
+	if (stat)
+		DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
+}
+
+#define DMA_INTERRUPTS ( \
+		  (1 << DMA_D_INTERRUPT) \
+		| (1 << DMA_C_INTERRUPT) \
+		| (1 << DMA_B_INTERRUPT) \
+		| (1 << DMA_A_INTERRUPT))
+#define	PCI_ERROR_INTERRUPTS ( \
+		  (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
+		| (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
+		| (1 << PCI_RETRY_ABORT_INTERRUPT))
+
+static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
+{
+	struct net2280_ep	*ep;
+	u32			tmp, num, mask, scratch;
+
+	/* after disconnect there's nothing else to do! */
+	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
+	mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
+
+	/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
+	 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
+	 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
+	 * only indicates a change in the reset state).
+	 */
+	if (stat & tmp) {
+		writel (tmp, &dev->regs->irqstat1);
+		if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT))
+					&& ((readl (&dev->usb->usbstat) & mask)
+							== 0))
+				|| ((readl (&dev->usb->usbctl)
+					& (1 << VBUS_PIN)) == 0)
+			    ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
+			DEBUG (dev, "disconnect %s\n",
+					dev->driver->driver.name);
+			stop_activity (dev, dev->driver);
+			ep0_start (dev);
+			return;
+		}
+		stat &= ~tmp;
+
+		/* vBUS can bounce ... one of many reasons to ignore the
+		 * notion of hotplug events on bus connect/disconnect!
+		 */
+		if (!stat)
+			return;
+	}
+
+	/* NOTE: chip stays in PCI D0 state for now, but it could
+	 * enter D1 to save more power
+	 */
+	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
+	if (stat & tmp) {
+		writel (tmp, &dev->regs->irqstat1);
+		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
+			if (dev->driver->suspend)
+				dev->driver->suspend (&dev->gadget);
+			if (!enable_suspend)
+				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
+		} else {
+			if (dev->driver->resume)
+				dev->driver->resume (&dev->gadget);
+			/* at high speed, note erratum 0133 */
+		}
+		stat &= ~tmp;
+	}
+
+	/* clear any other status/irqs */
+	if (stat)
+		writel (stat, &dev->regs->irqstat1);
+
+	/* some status we can just ignore */
+	if (dev->pdev->device == 0x2280)
+		stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
+			  | (1 << SUSPEND_REQUEST_INTERRUPT)
+			  | (1 << RESUME_INTERRUPT)
+			  | (1 << SOF_INTERRUPT));
+	else
+		stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
+			  | (1 << RESUME_INTERRUPT)
+			  | (1 << SOF_DOWN_INTERRUPT)
+			  | (1 << SOF_INTERRUPT));
+
+	if (!stat)
+		return;
+	// DEBUG (dev, "irqstat1 %08x\n", stat);
+
+	/* DMA status, for ep-{a,b,c,d} */
+	scratch = stat & DMA_INTERRUPTS;
+	stat &= ~DMA_INTERRUPTS;
+	scratch >>= 9;
+	for (num = 0; scratch; num++) {
+		struct net2280_dma_regs	__iomem *dma;
+
+		tmp = 1 << num;
+		if ((tmp & scratch) == 0)
+			continue;
+		scratch ^= tmp;
+
+		ep = &dev->ep [num + 1];
+		dma = ep->dma;
+
+		if (!dma)
+			continue;
+
+		/* clear ep's dma status */
+		tmp = readl (&dma->dmastat);
+		writel (tmp, &dma->dmastat);
+
+		/* chaining should stop on abort, short OUT from fifo,
+		 * or (stat0 codepath) short OUT transfer.
+		 */
+		if (!use_dma_chaining) {
+			if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
+					== 0) {
+				DEBUG (ep->dev, "%s no xact done? %08x\n",
+					ep->ep.name, tmp);
+				continue;
+			}
+			stop_dma (ep->dma);
+		}
+
+		/* OUT transfers terminate when the data from the
+		 * host is in our memory.  Process whatever's done.
+		 * On this path, we know transfer's last packet wasn't
+		 * less than req->length. NAK_OUT_PACKETS may be set,
+		 * or the FIFO may already be holding new packets.
+		 *
+		 * IN transfers can linger in the FIFO for a very
+		 * long time ... we ignore that for now, accounting
+		 * precisely (like PIO does) needs per-packet irqs
+		 */
+		scan_dma_completions (ep);
+
+		/* disable dma on inactive queues; else maybe restart */
+		if (list_empty (&ep->queue)) {
+			if (use_dma_chaining)
+				stop_dma (ep->dma);
+		} else {
+			tmp = readl (&dma->dmactl);
+			if (!use_dma_chaining
+					|| (tmp & (1 << DMA_ENABLE)) == 0)
+				restart_dma (ep);
+			else if (ep->is_in && use_dma_chaining) {
+				struct net2280_request	*req;
+				__le32			dmacount;
+
+				/* the descriptor at the head of the chain
+				 * may still have VALID_BIT clear; that's
+				 * used to trigger changing DMA_FIFO_VALIDATE
+				 * (affects automagic zlp writes).
+				 */
+				req = list_entry (ep->queue.next,
+						struct net2280_request, queue);
+				dmacount = req->td->dmacount;
+				dmacount &= cpu_to_le32 (
+						(1 << VALID_BIT)
+						| DMA_BYTE_COUNT_MASK);
+				if (dmacount && (dmacount & valid_bit) == 0)
+					restart_dma (ep);
+			}
+		}
+		ep->irqs++;
+	}
+
+	/* NOTE:  there are other PCI errors we might usefully notice.
+	 * if they appear very often, here's where to try recovering.
+	 */
+	if (stat & PCI_ERROR_INTERRUPTS) {
+		ERROR (dev, "pci dma error; stat %08x\n", stat);
+		stat &= ~PCI_ERROR_INTERRUPTS;
+		/* these are fatal errors, but "maybe" they won't
+		 * happen again ...
+		 */
+		stop_activity (dev, dev->driver);
+		ep0_start (dev);
+		stat = 0;
+	}
+
+	if (stat)
+		DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
+}
+
+static irqreturn_t net2280_irq (int irq, void *_dev)
+{
+	struct net2280		*dev = _dev;
+
+	/* shared interrupt, not ours */
+	if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED)))
+		return IRQ_NONE;
+
+	spin_lock (&dev->lock);
+
+	/* handle disconnect, dma, and more */
+	handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
+
+	/* control requests and PIO */
+	handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
+
+	spin_unlock (&dev->lock);
+
+	return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void gadget_release (struct device *_dev)
+{
+	struct net2280	*dev = dev_get_drvdata (_dev);
+
+	kfree (dev);
+}
+
+/* tear down the binding between this driver and the pci device */
+
+static void net2280_remove (struct pci_dev *pdev)
+{
+	struct net2280		*dev = pci_get_drvdata (pdev);
+
+	usb_del_gadget_udc(&dev->gadget);
+
+	BUG_ON(dev->driver);
+
+	/* then clean up the resources we allocated during probe() */
+	net2280_led_shutdown (dev);
+	if (dev->requests) {
+		int		i;
+		for (i = 1; i < 5; i++) {
+			if (!dev->ep [i].dummy)
+				continue;
+			pci_pool_free (dev->requests, dev->ep [i].dummy,
+					dev->ep [i].td_dma);
+		}
+		pci_pool_destroy (dev->requests);
+	}
+	if (dev->got_irq)
+		free_irq (pdev->irq, dev);
+	if (dev->regs)
+		iounmap (dev->regs);
+	if (dev->region)
+		release_mem_region (pci_resource_start (pdev, 0),
+				pci_resource_len (pdev, 0));
+	if (dev->enabled)
+		pci_disable_device (pdev);
+	device_unregister (&dev->gadget.dev);
+	device_remove_file (&pdev->dev, &dev_attr_registers);
+	pci_set_drvdata (pdev, NULL);
+
+	INFO (dev, "unbind\n");
+}
+
+/* wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+
+static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct net2280		*dev;
+	unsigned long		resource, len;
+	void			__iomem *base = NULL;
+	int			retval, i;
+
+	/* alloc, and start init */
+	dev = kzalloc (sizeof *dev, GFP_KERNEL);
+	if (dev == NULL){
+		retval = -ENOMEM;
+		goto done;
+	}
+
+	pci_set_drvdata (pdev, dev);
+	spin_lock_init (&dev->lock);
+	dev->pdev = pdev;
+	dev->gadget.ops = &net2280_ops;
+	dev->gadget.max_speed = USB_SPEED_HIGH;
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&dev->gadget.dev, "gadget");
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = driver_name;
+
+	/* now all the pci goodies ... */
+	if (pci_enable_device (pdev) < 0) {
+	        retval = -ENODEV;
+		goto done;
+	}
+	dev->enabled = 1;
+
+	/* BAR 0 holds all the registers
+	 * BAR 1 is 8051 memory; unused here (note erratum 0103)
+	 * BAR 2 is fifo memory; unused here
+	 */
+	resource = pci_resource_start (pdev, 0);
+	len = pci_resource_len (pdev, 0);
+	if (!request_mem_region (resource, len, driver_name)) {
+		DEBUG (dev, "controller already in use\n");
+		retval = -EBUSY;
+		goto done;
+	}
+	dev->region = 1;
+
+	/* FIXME provide firmware download interface to put
+	 * 8051 code into the chip, e.g. to turn on PCI PM.
+	 */
+
+	base = ioremap_nocache (resource, len);
+	if (base == NULL) {
+		DEBUG (dev, "can't map memory\n");
+		retval = -EFAULT;
+		goto done;
+	}
+	dev->regs = (struct net2280_regs __iomem *) base;
+	dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
+	dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
+	dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
+	dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
+	dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
+
+	/* put into initial config, link up all endpoints */
+	writel (0, &dev->usb->usbctl);
+	usb_reset (dev);
+	usb_reinit (dev);
+
+	/* irq setup after old hardware is cleaned up */
+	if (!pdev->irq) {
+		ERROR (dev, "No IRQ.  Check PCI setup!\n");
+		retval = -ENODEV;
+		goto done;
+	}
+
+	if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev)
+			!= 0) {
+		ERROR (dev, "request interrupt %d failed\n", pdev->irq);
+		retval = -EBUSY;
+		goto done;
+	}
+	dev->got_irq = 1;
+
+	/* DMA setup */
+	/* NOTE:  we know only the 32 LSBs of dma addresses may be nonzero */
+	dev->requests = pci_pool_create ("requests", pdev,
+		sizeof (struct net2280_dma),
+		0 /* no alignment requirements */,
+		0 /* or page-crossing issues */);
+	if (!dev->requests) {
+		DEBUG (dev, "can't get request pool\n");
+		retval = -ENOMEM;
+		goto done;
+	}
+	for (i = 1; i < 5; i++) {
+		struct net2280_dma	*td;
+
+		td = pci_pool_alloc (dev->requests, GFP_KERNEL,
+				&dev->ep [i].td_dma);
+		if (!td) {
+			DEBUG (dev, "can't get dummy %d\n", i);
+			retval = -ENOMEM;
+			goto done;
+		}
+		td->dmacount = 0;	/* not VALID */
+		td->dmaaddr = cpu_to_le32 (DMA_ADDR_INVALID);
+		td->dmadesc = td->dmaaddr;
+		dev->ep [i].dummy = td;
+	}
+
+	/* enable lower-overhead pci memory bursts during DMA */
+	writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
+			// 256 write retries may not be enough...
+			// | (1 << PCI_RETRY_ABORT_ENABLE)
+			| (1 << DMA_READ_MULTIPLE_ENABLE)
+			| (1 << DMA_READ_LINE_ENABLE)
+			, &dev->pci->pcimstctl);
+	/* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
+	pci_set_master (pdev);
+	pci_try_set_mwi (pdev);
+
+	/* ... also flushes any posted pci writes */
+	dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
+
+	/* done */
+	INFO (dev, "%s\n", driver_desc);
+	INFO (dev, "irq %d, pci mem %p, chip rev %04x\n",
+			pdev->irq, base, dev->chiprev);
+	INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
+			use_dma
+				? (use_dma_chaining ? "chaining" : "enabled")
+				: "disabled");
+	retval = device_register (&dev->gadget.dev);
+	if (retval) goto done;
+	retval = device_create_file (&pdev->dev, &dev_attr_registers);
+	if (retval) goto done;
+
+	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+	if (retval)
+		goto done;
+	return 0;
+
+done:
+	if (dev)
+		net2280_remove (pdev);
+	return retval;
+}
+
+/* make sure the board is quiescent; otherwise it will continue
+ * generating IRQs across the upcoming reboot.
+ */
+
+static void net2280_shutdown (struct pci_dev *pdev)
+{
+	struct net2280		*dev = pci_get_drvdata (pdev);
+
+	/* disable IRQs */
+	writel (0, &dev->regs->pciirqenb0);
+	writel (0, &dev->regs->pciirqenb1);
+
+	/* disable the pullup so the host will think we're gone */
+	writel (0, &dev->usb->usbctl);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static const struct pci_device_id pci_ids [] = { {
+	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+	.class_mask =	~0,
+	.vendor =	0x17cc,
+	.device =	0x2280,
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+}, {
+	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+	.class_mask =	~0,
+	.vendor =	0x17cc,
+	.device =	0x2282,
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+
+}, { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE (pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver net2280_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+
+	.probe =	net2280_probe,
+	.remove =	net2280_remove,
+	.shutdown =	net2280_shutdown,
+
+	/* FIXME add power management support */
+};
+
+MODULE_DESCRIPTION (DRIVER_DESC);
+MODULE_AUTHOR ("David Brownell");
+MODULE_LICENSE ("GPL");
+
+static int __init init (void)
+{
+	if (!use_dma)
+		use_dma_chaining = 0;
+	return pci_register_driver (&net2280_pci_driver);
+}
+module_init (init);
+
+static void __exit cleanup (void)
+{
+	pci_unregister_driver (&net2280_pci_driver);
+}
+module_exit (cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2280.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2280.h
new file mode 100644
index 0000000..a844be0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/net2280.h
@@ -0,0 +1,308 @@
+/*
+ * NetChip 2280 high/full speed USB device controller.
+ * Unlike many such controllers, this one talks PCI.
+ */
+
+/*
+ * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/usb/net2280.h>
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	__KERNEL__
+
+/* indexed registers [11.10] are accessed indirectly
+ * caller must own the device lock.
+ */
+
+static inline u32
+get_idx_reg (struct net2280_regs __iomem *regs, u32 index)
+{
+	writel (index, &regs->idxaddr);
+	/* NOTE:  synchs device/cpu memory views */
+	return readl (&regs->idxdata);
+}
+
+static inline void
+set_idx_reg (struct net2280_regs __iomem *regs, u32 index, u32 value)
+{
+	writel (index, &regs->idxaddr);
+	writel (value, &regs->idxdata);
+	/* posted, may not be visible yet */
+}
+
+#endif	/* __KERNEL__ */
+
+
+#define REG_DIAG		0x0
+#define     RETRY_COUNTER                                       16
+#define     FORCE_PCI_SERR                                      11
+#define     FORCE_PCI_INTERRUPT                                 10
+#define     FORCE_USB_INTERRUPT                                 9
+#define     FORCE_CPU_INTERRUPT                                 8
+#define     ILLEGAL_BYTE_ENABLES                                5
+#define     FAST_TIMES                                          4
+#define     FORCE_RECEIVE_ERROR                                 2
+#define     FORCE_TRANSMIT_CRC_ERROR                            0
+#define REG_FRAME		0x02	/* from last sof */
+#define REG_CHIPREV		0x03	/* in bcd */
+#define	REG_HS_NAK_RATE		0x0a	/* NAK per N uframes */
+
+#define	CHIPREV_1	0x0100
+#define	CHIPREV_1A	0x0110
+
+#ifdef	__KERNEL__
+
+/* ep a-f highspeed and fullspeed maxpacket, addresses
+ * computed from ep->num
+ */
+#define REG_EP_MAXPKT(dev,num) (((num) + 1) * 0x10 + \
+		(((dev)->gadget.speed == USB_SPEED_HIGH) ? 0 : 1))
+
+/*-------------------------------------------------------------------------*/
+
+/* [8.3] for scatter/gather i/o
+ * use struct net2280_dma_regs bitfields
+ */
+struct net2280_dma {
+	__le32		dmacount;
+	__le32		dmaaddr;		/* the buffer */
+	__le32		dmadesc;		/* next dma descriptor */
+	__le32		_reserved;
+} __attribute__ ((aligned (16)));
+
+/*-------------------------------------------------------------------------*/
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+
+struct net2280_ep {
+	struct usb_ep				ep;
+	struct net2280_ep_regs			__iomem *regs;
+	struct net2280_dma_regs			__iomem *dma;
+	struct net2280_dma			*dummy;
+	dma_addr_t				td_dma;	/* of dummy */
+	struct net2280				*dev;
+	unsigned long				irqs;
+
+	/* analogous to a host-side qh */
+	struct list_head			queue;
+	const struct usb_endpoint_descriptor	*desc;
+	unsigned				num : 8,
+						fifo_size : 12,
+						in_fifo_validate : 1,
+						out_overflow : 1,
+						stopped : 1,
+						wedged : 1,
+						is_in : 1,
+						is_iso : 1,
+						responded : 1;
+};
+
+static inline void allow_status (struct net2280_ep *ep)
+{
+	/* ep0 only */
+	writel (  (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
+		| (1 << CLEAR_NAK_OUT_PACKETS)
+		| (1 << CLEAR_NAK_OUT_PACKETS_MODE)
+		, &ep->regs->ep_rsp);
+	ep->stopped = 1;
+}
+
+/* count (<= 4) bytes in the next fifo write will be valid */
+static inline void set_fifo_bytecount (struct net2280_ep *ep, unsigned count)
+{
+	writeb (count, 2 + (u8 __iomem *) &ep->regs->ep_cfg);
+}
+
+struct net2280_request {
+	struct usb_request		req;
+	struct net2280_dma		*td;
+	dma_addr_t			td_dma;
+	struct list_head		queue;
+	unsigned			mapped : 1,
+					valid : 1;
+};
+
+struct net2280 {
+	/* each pci device provides one gadget, several endpoints */
+	struct usb_gadget		gadget;
+	spinlock_t			lock;
+	struct net2280_ep		ep [7];
+	struct usb_gadget_driver 	*driver;
+	unsigned			enabled : 1,
+					protocol_stall : 1,
+					softconnect : 1,
+					got_irq : 1,
+					region : 1;
+	u16				chiprev;
+
+	/* pci state used to access those endpoints */
+	struct pci_dev			*pdev;
+	struct net2280_regs		__iomem *regs;
+	struct net2280_usb_regs		__iomem *usb;
+	struct net2280_pci_regs		__iomem *pci;
+	struct net2280_dma_regs		__iomem *dma;
+	struct net2280_dep_regs		__iomem *dep;
+	struct net2280_ep_regs		__iomem *epregs;
+
+	struct pci_pool			*requests;
+	// statistics...
+};
+
+static inline void set_halt (struct net2280_ep *ep)
+{
+	/* ep0 and bulk/intr endpoints */
+	writel (  (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
+		    /* set NAK_OUT for erratum 0114 */
+		| ((ep->dev->chiprev == CHIPREV_1) << SET_NAK_OUT_PACKETS)
+		| (1 << SET_ENDPOINT_HALT)
+		, &ep->regs->ep_rsp);
+}
+
+static inline void clear_halt (struct net2280_ep *ep)
+{
+	/* ep0 and bulk/intr endpoints */
+	writel (  (1 << CLEAR_ENDPOINT_HALT)
+		| (1 << CLEAR_ENDPOINT_TOGGLE)
+		    /* unless the gadget driver left a short packet in the
+		     * fifo, this reverses the erratum 0114 workaround.
+		     */
+		| ((ep->dev->chiprev == CHIPREV_1) << CLEAR_NAK_OUT_PACKETS)
+		, &ep->regs->ep_rsp);
+}
+
+#ifdef USE_RDK_LEDS
+
+static inline void net2280_led_init (struct net2280 *dev)
+{
+	/* LED3 (green) is on during USB activity. note erratum 0113. */
+	writel ((1 << GPIO3_LED_SELECT)
+		| (1 << GPIO3_OUTPUT_ENABLE)
+		| (1 << GPIO2_OUTPUT_ENABLE)
+		| (1 << GPIO1_OUTPUT_ENABLE)
+		| (1 << GPIO0_OUTPUT_ENABLE)
+		, &dev->regs->gpioctl);
+}
+
+/* indicate speed with bi-color LED 0/1 */
+static inline
+void net2280_led_speed (struct net2280 *dev, enum usb_device_speed speed)
+{
+	u32	val = readl (&dev->regs->gpioctl);
+	switch (speed) {
+	case USB_SPEED_HIGH:		/* green */
+		val &= ~(1 << GPIO0_DATA);
+		val |= (1 << GPIO1_DATA);
+		break;
+	case USB_SPEED_FULL:		/* red */
+		val &= ~(1 << GPIO1_DATA);
+		val |= (1 << GPIO0_DATA);
+		break;
+	default:			/* (off/black) */
+		val &= ~((1 << GPIO1_DATA) | (1 << GPIO0_DATA));
+		break;
+	}
+	writel (val, &dev->regs->gpioctl);
+}
+
+/* indicate power with LED 2 */
+static inline void net2280_led_active (struct net2280 *dev, int is_active)
+{
+	u32	val = readl (&dev->regs->gpioctl);
+
+	// FIXME this LED never seems to turn on.
+	if (is_active)
+		val |= GPIO2_DATA;
+	else
+		val &= ~GPIO2_DATA;
+	writel (val, &dev->regs->gpioctl);
+}
+static inline void net2280_led_shutdown (struct net2280 *dev)
+{
+	/* turn off all four GPIO*_DATA bits */
+	writel (readl (&dev->regs->gpioctl) & ~0x0f,
+			&dev->regs->gpioctl);
+}
+
+#else
+
+#define net2280_led_init(dev)		do { } while (0)
+#define net2280_led_speed(dev, speed)	do { } while (0)
+#define net2280_led_shutdown(dev)	do { } while (0)
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(dev,level,fmt,args...) \
+	printk(level "%s %s: " fmt , driver_name , \
+			pci_name(dev->pdev) , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DEBUG(dev,fmt,args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DEBUG(dev,fmt,args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDEBUG DEBUG
+#else
+#define VDEBUG(dev,fmt,args...) \
+	do { } while (0)
+#endif	/* VERBOSE */
+
+#define ERROR(dev,fmt,args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define WARNING(dev,fmt,args...) \
+	xprintk(dev , KERN_WARNING , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+static inline void start_out_naking (struct net2280_ep *ep)
+{
+	/* NOTE:  hardware races lurk here, and PING protocol issues */
+	writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+	/* synch with device */
+	readl (&ep->regs->ep_rsp);
+}
+
+#ifdef DEBUG
+static inline void assert_out_naking (struct net2280_ep *ep, const char *where)
+{
+	u32	tmp = readl (&ep->regs->ep_stat);
+
+	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
+		DEBUG (ep->dev, "%s %s %08x !NAK\n",
+				ep->ep.name, where, tmp);
+		writel ((1 << SET_NAK_OUT_PACKETS),
+			&ep->regs->ep_rsp);
+	}
+}
+#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep,__func__)
+#else
+#define ASSERT_OUT_NAKING(ep) do {} while (0)
+#endif
+
+static inline void stop_out_naking (struct net2280_ep *ep)
+{
+	u32	tmp;
+
+	tmp = readl (&ep->regs->ep_stat);
+	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
+		writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+}
+
+#endif	/* __KERNEL__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/nokia.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/nokia.c
new file mode 100644
index 0000000..c7fb772
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/nokia.c
@@ -0,0 +1,259 @@
+/*
+ * nokia.c -- Nokia Composite Gadget Driver
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This gadget driver borrows from serial.c which is:
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * version 2 of that License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+
+#include "u_serial.h"
+#include "u_ether.h"
+#include "u_phonet.h"
+#include "gadget_chips.h"
+
+/* Defines */
+
+#define NOKIA_VERSION_NUM		0x0211
+#define NOKIA_LONG_NAME			"N900 (PC-Suite Mode)"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_ecm.c"
+#include "f_obex.c"
+#include "f_serial.c"
+#include "f_phonet.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+#define NOKIA_VENDOR_ID			0x0421	/* Nokia */
+#define NOKIA_PRODUCT_ID		0x01c8	/* Nokia Gadget */
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_DESCRIPTION_IDX		2
+
+static char manufacturer_nokia[] = "Nokia";
+static const char product_nokia[] = NOKIA_LONG_NAME;
+static const char description_nokia[] = "PC-Suite Configuration";
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer_nokia,
+	[STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME,
+	[STRING_DESCRIPTION_IDX].s = description_nokia,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+	.bLength		= USB_DT_DEVICE_SIZE,
+	.bDescriptorType	= USB_DT_DEVICE,
+	.bcdUSB			= __constant_cpu_to_le16(0x0200),
+	.bDeviceClass		= USB_CLASS_COMM,
+	.idVendor		= __constant_cpu_to_le16(NOKIA_VENDOR_ID),
+	.idProduct		= __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	.bNumConfigurations =	1,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Module */
+MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_LICENSE("GPL");
+
+/*-------------------------------------------------------------------------*/
+
+static u8 hostaddr[ETH_ALEN];
+
+static int __init nokia_bind_config(struct usb_configuration *c)
+{
+	int status = 0;
+
+	status = phonet_bind_config(c);
+	if (status)
+		printk(KERN_DEBUG "could not bind phonet config\n");
+
+	status = obex_bind_config(c, 0);
+	if (status)
+		printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+
+	status = obex_bind_config(c, 1);
+	if (status)
+		printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+
+	status = acm_bind_config(c, 2);
+	if (status)
+		printk(KERN_DEBUG "could not bind acm config\n");
+
+	status = ecm_bind_config(c, hostaddr);
+	if (status)
+		printk(KERN_DEBUG "could not bind ecm config\n");
+
+	return status;
+}
+
+static struct usb_configuration nokia_config_500ma_driver = {
+	.label		= "Bus Powered",
+	.bConfigurationValue = 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes	= USB_CONFIG_ATT_ONE,
+	.bMaxPower	= 250, /* 500mA */
+};
+
+static struct usb_configuration nokia_config_100ma_driver = {
+	.label		= "Self Powered",
+	.bConfigurationValue = 2,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes	= USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower	= 50, /* 100 mA */
+};
+
+static int __init nokia_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			status;
+
+	status = gphonet_setup(cdev->gadget);
+	if (status < 0)
+		goto err_phonet;
+
+	status = gserial_setup(cdev->gadget, 3);
+	if (status < 0)
+		goto err_serial;
+
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		goto err_ether;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto err_usb;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto err_usb;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+
+	device_desc.iProduct = status;
+
+	/* config description */
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto err_usb;
+	strings_dev[STRING_DESCRIPTION_IDX].id = status;
+
+	nokia_config_500ma_driver.iConfiguration = status;
+	nokia_config_100ma_driver.iConfiguration = status;
+
+	/* set up other descriptors */
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM);
+	else {
+		/* this should only work with hw that supports altsettings
+		 * and several endpoints, anything else, panic.
+		 */
+		pr_err("nokia_bind: controller '%s' not recognized\n",
+			gadget->name);
+		goto err_usb;
+	}
+
+	/* finally register the configuration */
+	status = usb_add_config(cdev, &nokia_config_500ma_driver,
+			nokia_bind_config);
+	if (status < 0)
+		goto err_usb;
+
+	status = usb_add_config(cdev, &nokia_config_100ma_driver,
+			nokia_bind_config);
+	if (status < 0)
+		goto err_usb;
+
+	dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME);
+
+	return 0;
+
+err_usb:
+	gether_cleanup();
+err_ether:
+	gserial_cleanup();
+err_serial:
+	gphonet_cleanup();
+err_phonet:
+	return status;
+}
+
+static int __exit nokia_unbind(struct usb_composite_dev *cdev)
+{
+	gphonet_cleanup();
+	gserial_cleanup();
+	gether_cleanup();
+
+	return 0;
+}
+
+static struct usb_composite_driver nokia_driver = {
+	.name		= "g_nokia",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= __exit_p(nokia_unbind),
+};
+
+static int __init nokia_init(void)
+{
+	return usb_composite_probe(&nokia_driver, nokia_bind);
+}
+module_init(nokia_init);
+
+static void __exit nokia_cleanup(void)
+{
+	usb_composite_unregister(&nokia_driver);
+}
+module_exit(nokia_cleanup);
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/omap_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/omap_udc.c
new file mode 100644
index 0000000..3b4b6dd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/omap_udc.c
@@ -0,0 +1,3148 @@
+/*
+ * omap_udc.c -- for OMAP full speed udc; most chips support OTG.
+ *
+ * Copyright (C) 2004 Texas Instruments, Inc.
+ * Copyright (C) 2004-2005 David Brownell
+ *
+ * OMAP2 & DMA support by Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#undef	DEBUG
+#undef	VERBOSE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/prefetch.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+#include <asm/mach-types.h>
+
+#include <plat/dma.h>
+#include <plat/usb.h>
+
+#include "omap_udc.h"
+
+#undef	USB_TRACE
+
+/* bulk DMA seems to be behaving for both IN and OUT */
+#define	USE_DMA
+
+/* ISO too */
+#define	USE_ISO
+
+#define	DRIVER_DESC	"OMAP UDC driver"
+#define	DRIVER_VERSION	"4 October 2004"
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#define OMAP2_DMA_CH(ch)	(((ch) - 1) << 1)
+#define OMAP24XX_DMA(name, ch)	(OMAP24XX_DMA_##name + OMAP2_DMA_CH(ch))
+
+/*
+ * The OMAP UDC needs _very_ early endpoint setup:  before enabling the
+ * D+ pullup to allow enumeration.  That's too early for the gadget
+ * framework to use from usb_endpoint_enable(), which happens after
+ * enumeration as part of activating an interface.  (But if we add an
+ * optional new "UDC not yet running" state to the gadget driver model,
+ * even just during driver binding, the endpoint autoconfig logic is the
+ * natural spot to manufacture new endpoints.)
+ *
+ * So instead of using endpoint enable calls to control the hardware setup,
+ * this driver defines a "fifo mode" parameter.  It's used during driver
+ * initialization to choose among a set of pre-defined endpoint configs.
+ * See omap_udc_setup() for available modes, or to add others.  That code
+ * lives in an init section, so use this driver as a module if you need
+ * to change the fifo mode after the kernel boots.
+ *
+ * Gadget drivers normally ignore endpoints they don't care about, and
+ * won't include them in configuration descriptors.  That means only
+ * misbehaving hosts would even notice they exist.
+ */
+#ifdef	USE_ISO
+static unsigned fifo_mode = 3;
+#else
+static unsigned fifo_mode = 0;
+#endif
+
+/* "modprobe omap_udc fifo_mode=42", or else as a kernel
+ * boot parameter "omap_udc:fifo_mode=42"
+ */
+module_param (fifo_mode, uint, 0);
+MODULE_PARM_DESC (fifo_mode, "endpoint configuration");
+
+#ifdef	USE_DMA
+static bool use_dma = 1;
+
+/* "modprobe omap_udc use_dma=y", or else as a kernel
+ * boot parameter "omap_udc:use_dma=y"
+ */
+module_param (use_dma, bool, 0);
+MODULE_PARM_DESC (use_dma, "enable/disable DMA");
+#else	/* !USE_DMA */
+
+/* save a bit of code */
+#define	use_dma		0
+#endif	/* !USE_DMA */
+
+
+static const char driver_name [] = "omap_udc";
+static const char driver_desc [] = DRIVER_DESC;
+
+/*-------------------------------------------------------------------------*/
+
+/* there's a notion of "current endpoint" for modifying endpoint
+ * state, and PIO access to its FIFO.
+ */
+
+static void use_ep(struct omap_ep *ep, u16 select)
+{
+	u16	num = ep->bEndpointAddress & 0x0f;
+
+	if (ep->bEndpointAddress & USB_DIR_IN)
+		num |= UDC_EP_DIR;
+	omap_writew(num | select, UDC_EP_NUM);
+	/* when select, MUST deselect later !! */
+}
+
+static inline void deselect_ep(void)
+{
+	u16 w;
+
+	w = omap_readw(UDC_EP_NUM);
+	w &= ~UDC_EP_SEL;
+	omap_writew(w, UDC_EP_NUM);
+	/* 6 wait states before TX will happen */
+}
+
+static void dma_channel_claim(struct omap_ep *ep, unsigned preferred);
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	u16		maxp;
+
+	/* catch various bogus parameters */
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT
+			|| ep->bEndpointAddress != desc->bEndpointAddress
+			|| ep->maxpacket < usb_endpoint_maxp(desc)) {
+		DBG("%s, bad ep or descriptor\n", __func__);
+		return -EINVAL;
+	}
+	maxp = usb_endpoint_maxp(desc);
+	if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+				&& maxp != ep->maxpacket)
+			|| usb_endpoint_maxp(desc) > ep->maxpacket
+			|| !desc->wMaxPacketSize) {
+		DBG("%s, bad %s maxpacket\n", __func__, _ep->name);
+		return -ERANGE;
+	}
+
+#ifdef	USE_ISO
+	if ((desc->bmAttributes == USB_ENDPOINT_XFER_ISOC
+				&& desc->bInterval != 1)) {
+		/* hardware wants period = 1; USB allows 2^(Interval-1) */
+		DBG("%s, unsupported ISO period %dms\n", _ep->name,
+				1 << (desc->bInterval - 1));
+		return -EDOM;
+	}
+#else
+	if (desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		DBG("%s, ISO nyet\n", _ep->name);
+		return -EDOM;
+	}
+#endif
+
+	/* xfer types must match, except that interrupt ~= bulk */
+	if (ep->bmAttributes != desc->bmAttributes
+			&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
+			&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
+		DBG("%s, %s type mismatch\n", __func__, _ep->name);
+		return -EINVAL;
+	}
+
+	udc = ep->udc;
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+		DBG("%s, bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	ep->desc = desc;
+	ep->irqs = 0;
+	ep->stopped = 0;
+	ep->ep.maxpacket = maxp;
+
+	/* set endpoint to initial state */
+	ep->dma_channel = 0;
+	ep->has_dma = 0;
+	ep->lch = -1;
+	use_ep(ep, UDC_EP_SEL);
+	omap_writew(udc->clr_halt, UDC_CTRL);
+	ep->ackwait = 0;
+	deselect_ep();
+
+	if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+		list_add(&ep->iso, &udc->iso);
+
+	/* maybe assign a DMA channel to this endpoint */
+	if (use_dma && desc->bmAttributes == USB_ENDPOINT_XFER_BULK)
+		/* FIXME ISO can dma, but prefers first channel */
+		dma_channel_claim(ep, 0);
+
+	/* PIO OUT may RX packets */
+	if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC
+			&& !ep->has_dma
+			&& !(ep->bEndpointAddress & USB_DIR_IN)) {
+		omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+		ep->ackwait = 1 + ep->double_buf;
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	VDBG("%s enabled\n", _ep->name);
+	return 0;
+}
+
+static void nuke(struct omap_ep *, int status);
+
+static int omap_ep_disable(struct usb_ep *_ep)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	unsigned long	flags;
+
+	if (!_ep || !ep->desc) {
+		DBG("%s, %s not enabled\n", __func__,
+			_ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	nuke (ep, -ESHUTDOWN);
+	ep->ep.maxpacket = ep->maxpacket;
+	ep->has_dma = 0;
+	omap_writew(UDC_SET_HALT, UDC_CTRL);
+	list_del_init(&ep->iso);
+	del_timer(&ep->timer);
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+	VDBG("%s disabled\n", _ep->name);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+	struct omap_req	*req;
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (req) {
+		req->req.dma = DMA_ADDR_INVALID;
+		INIT_LIST_HEAD (&req->queue);
+	}
+	return &req->req;
+}
+
+static void
+omap_free_request(struct usb_ep *ep, struct usb_request *_req)
+{
+	struct omap_req	*req = container_of(_req, struct omap_req, req);
+
+	if (_req)
+		kfree (req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+done(struct omap_ep *ep, struct omap_req *req, int status)
+{
+	unsigned		stopped = ep->stopped;
+
+	list_del_init(&req->queue);
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (use_dma && ep->has_dma) {
+		if (req->mapped) {
+			dma_unmap_single(ep->udc->gadget.dev.parent,
+				req->req.dma, req->req.length,
+				(ep->bEndpointAddress & USB_DIR_IN)
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+			req->req.dma = DMA_ADDR_INVALID;
+			req->mapped = 0;
+		} else
+			dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
+				req->req.dma, req->req.length,
+				(ep->bEndpointAddress & USB_DIR_IN)
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+	}
+
+#ifndef	USB_TRACE
+	if (status && status != -ESHUTDOWN)
+#endif
+		VDBG("complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	spin_unlock(&ep->udc->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&ep->udc->lock);
+	ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define UDC_FIFO_FULL		(UDC_NON_ISO_FIFO_FULL | UDC_ISO_FIFO_FULL)
+#define UDC_FIFO_UNWRITABLE	(UDC_EP_HALTED | UDC_FIFO_FULL)
+
+#define FIFO_EMPTY	(UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
+#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
+
+static inline int
+write_packet(u8 *buf, struct omap_req *req, unsigned max)
+{
+	unsigned	len;
+	u16		*wp;
+
+	len = min(req->req.length - req->req.actual, max);
+	req->req.actual += len;
+
+	max = len;
+	if (likely((((int)buf) & 1) == 0)) {
+		wp = (u16 *)buf;
+		while (max >= 2) {
+			omap_writew(*wp++, UDC_DATA);
+			max -= 2;
+		}
+		buf = (u8 *)wp;
+	}
+	while (max--)
+		omap_writeb(*buf++, UDC_DATA);
+	return len;
+}
+
+// FIXME change r/w fifo calling convention
+
+
+// return:  0 = still running, 1 = completed, negative = errno
+static int write_fifo(struct omap_ep *ep, struct omap_req *req)
+{
+	u8		*buf;
+	unsigned	count;
+	int		is_last;
+	u16		ep_stat;
+
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	/* PIO-IN isn't double buffered except for iso */
+	ep_stat = omap_readw(UDC_STAT_FLG);
+	if (ep_stat & UDC_FIFO_UNWRITABLE)
+		return 0;
+
+	count = ep->ep.maxpacket;
+	count = write_packet(buf, req, count);
+	omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+	ep->ackwait = 1;
+
+	/* last packet is often short (sometimes a zlp) */
+	if (count != ep->ep.maxpacket)
+		is_last = 1;
+	else if (req->req.length == req->req.actual
+			&& !req->req.zero)
+		is_last = 1;
+	else
+		is_last = 0;
+
+	/* NOTE:  requests complete when all IN data is in a
+	 * FIFO (or sometimes later, if a zlp was needed).
+	 * Use usb_ep_fifo_status() where needed.
+	 */
+	if (is_last)
+		done(ep, req, 0);
+	return is_last;
+}
+
+static inline int
+read_packet(u8 *buf, struct omap_req *req, unsigned avail)
+{
+	unsigned	len;
+	u16		*wp;
+
+	len = min(req->req.length - req->req.actual, avail);
+	req->req.actual += len;
+	avail = len;
+
+	if (likely((((int)buf) & 1) == 0)) {
+		wp = (u16 *)buf;
+		while (avail >= 2) {
+			*wp++ = omap_readw(UDC_DATA);
+			avail -= 2;
+		}
+		buf = (u8 *)wp;
+	}
+	while (avail--)
+		*buf++ = omap_readb(UDC_DATA);
+	return len;
+}
+
+// return:  0 = still running, 1 = queue empty, negative = errno
+static int read_fifo(struct omap_ep *ep, struct omap_req *req)
+{
+	u8		*buf;
+	unsigned	count, avail;
+	int		is_last;
+
+	buf = req->req.buf + req->req.actual;
+	prefetchw(buf);
+
+	for (;;) {
+		u16	ep_stat = omap_readw(UDC_STAT_FLG);
+
+		is_last = 0;
+		if (ep_stat & FIFO_EMPTY) {
+			if (!ep->double_buf)
+				break;
+			ep->fnf = 1;
+		}
+		if (ep_stat & UDC_EP_HALTED)
+			break;
+
+		if (ep_stat & UDC_FIFO_FULL)
+			avail = ep->ep.maxpacket;
+		else  {
+			avail = omap_readw(UDC_RXFSTAT);
+			ep->fnf = ep->double_buf;
+		}
+		count = read_packet(buf, req, avail);
+
+		/* partial packet reads may not be errors */
+		if (count < ep->ep.maxpacket) {
+			is_last = 1;
+			/* overflowed this request?  flush extra data */
+			if (count != avail) {
+				req->req.status = -EOVERFLOW;
+				avail -= count;
+				while (avail--)
+					omap_readw(UDC_DATA);
+			}
+		} else if (req->req.length == req->req.actual)
+			is_last = 1;
+		else
+			is_last = 0;
+
+		if (!ep->bEndpointAddress)
+			break;
+		if (is_last)
+			done(ep, req, 0);
+		break;
+	}
+	return is_last;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
+{
+	dma_addr_t	end;
+
+	/* IN-DMA needs this on fault/cancel paths, so 15xx misreports
+	 * the last transfer's bytecount by more than a FIFO's worth.
+	 */
+	if (cpu_is_omap15xx())
+		return 0;
+
+	end = omap_get_dma_src_pos(ep->lch);
+	if (end == ep->dma_counter)
+		return 0;
+
+	end |= start & (0xffff << 16);
+	if (end < start)
+		end += 0x10000;
+	return end - start;
+}
+
+static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
+{
+	dma_addr_t	end;
+
+	end = omap_get_dma_dst_pos(ep->lch);
+	if (end == ep->dma_counter)
+		return 0;
+
+	end |= start & (0xffff << 16);
+	if (cpu_is_omap15xx())
+		end++;
+	if (end < start)
+		end += 0x10000;
+	return end - start;
+}
+
+
+/* Each USB transfer request using DMA maps to one or more DMA transfers.
+ * When DMA completion isn't request completion, the UDC continues with
+ * the next DMA transfer for that USB transfer.
+ */
+
+static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
+{
+	u16		txdma_ctrl, w;
+	unsigned	length = req->req.length - req->req.actual;
+	const int	sync_mode = cpu_is_omap15xx()
+				? OMAP_DMA_SYNC_FRAME
+				: OMAP_DMA_SYNC_ELEMENT;
+	int		dma_trigger = 0;
+
+	if (cpu_is_omap24xx())
+		dma_trigger = OMAP24XX_DMA(USB_W2FC_TX0, ep->dma_channel);
+
+	/* measure length in either bytes or packets */
+	if ((cpu_is_omap16xx() && length <= UDC_TXN_TSC)
+			|| (cpu_is_omap24xx() && length < ep->maxpacket)
+			|| (cpu_is_omap15xx() && length < ep->maxpacket)) {
+		txdma_ctrl = UDC_TXN_EOT | length;
+		omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
+				length, 1, sync_mode, dma_trigger, 0);
+	} else {
+		length = min(length / ep->maxpacket,
+				(unsigned) UDC_TXN_TSC + 1);
+		txdma_ctrl = length;
+		omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
+				ep->ep.maxpacket >> 1, length, sync_mode,
+				dma_trigger, 0);
+		length *= ep->maxpacket;
+	}
+	omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
+		OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
+		0, 0);
+
+	omap_start_dma(ep->lch);
+	ep->dma_counter = omap_get_dma_src_pos(ep->lch);
+	w = omap_readw(UDC_DMA_IRQ_EN);
+	w |= UDC_TX_DONE_IE(ep->dma_channel);
+	omap_writew(w, UDC_DMA_IRQ_EN);
+	omap_writew(UDC_TXN_START | txdma_ctrl, UDC_TXDMA(ep->dma_channel));
+	req->dma_bytes = length;
+}
+
+static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
+{
+	u16 w;
+
+	if (status == 0) {
+		req->req.actual += req->dma_bytes;
+
+		/* return if this request needs to send data or zlp */
+		if (req->req.actual < req->req.length)
+			return;
+		if (req->req.zero
+				&& req->dma_bytes != 0
+				&& (req->req.actual % ep->maxpacket) == 0)
+			return;
+	} else
+		req->req.actual += dma_src_len(ep, req->req.dma
+							+ req->req.actual);
+
+	/* tx completion */
+	omap_stop_dma(ep->lch);
+	w = omap_readw(UDC_DMA_IRQ_EN);
+	w &= ~UDC_TX_DONE_IE(ep->dma_channel);
+	omap_writew(w, UDC_DMA_IRQ_EN);
+	done(ep, req, status);
+}
+
+static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
+{
+	unsigned packets = req->req.length - req->req.actual;
+	int dma_trigger = 0;
+	u16 w;
+
+	if (cpu_is_omap24xx())
+		dma_trigger = OMAP24XX_DMA(USB_W2FC_RX0, ep->dma_channel);
+
+	/* NOTE:  we filtered out "short reads" before, so we know
+	 * the buffer has only whole numbers of packets.
+	 * except MODE SELECT(6) sent the 24 bytes data in OMAP24XX DMA mode
+	 */
+	if (cpu_is_omap24xx() && packets < ep->maxpacket) {
+		omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
+				packets, 1, OMAP_DMA_SYNC_ELEMENT,
+				dma_trigger, 0);
+		req->dma_bytes = packets;
+	} else {
+		/* set up this DMA transfer, enable the fifo, start */
+		packets /= ep->ep.maxpacket;
+		packets = min(packets, (unsigned)UDC_RXN_TC + 1);
+		req->dma_bytes = packets * ep->ep.maxpacket;
+		omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
+				ep->ep.maxpacket >> 1, packets,
+				OMAP_DMA_SYNC_ELEMENT,
+				dma_trigger, 0);
+	}
+	omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
+		OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
+		0, 0);
+	ep->dma_counter = omap_get_dma_dst_pos(ep->lch);
+
+	omap_writew(UDC_RXN_STOP | (packets - 1), UDC_RXDMA(ep->dma_channel));
+	w = omap_readw(UDC_DMA_IRQ_EN);
+	w |= UDC_RX_EOT_IE(ep->dma_channel);
+	omap_writew(w, UDC_DMA_IRQ_EN);
+	omap_writew(ep->bEndpointAddress & 0xf, UDC_EP_NUM);
+	omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+
+	omap_start_dma(ep->lch);
+}
+
+static void
+finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
+{
+	u16	count, w;
+
+	if (status == 0)
+		ep->dma_counter = (u16) (req->req.dma + req->req.actual);
+	count = dma_dest_len(ep, req->req.dma + req->req.actual);
+	count += req->req.actual;
+	if (one)
+		count--;
+	if (count <= req->req.length)
+		req->req.actual = count;
+
+	if (count != req->dma_bytes || status)
+		omap_stop_dma(ep->lch);
+
+	/* if this wasn't short, request may need another transfer */
+	else if (req->req.actual < req->req.length)
+		return;
+
+	/* rx completion */
+	w = omap_readw(UDC_DMA_IRQ_EN);
+	w &= ~UDC_RX_EOT_IE(ep->dma_channel);
+	omap_writew(w, UDC_DMA_IRQ_EN);
+	done(ep, req, status);
+}
+
+static void dma_irq(struct omap_udc *udc, u16 irq_src)
+{
+	u16		dman_stat = omap_readw(UDC_DMAN_STAT);
+	struct omap_ep	*ep;
+	struct omap_req	*req;
+
+	/* IN dma: tx to host */
+	if (irq_src & UDC_TXN_DONE) {
+		ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)];
+		ep->irqs++;
+		/* can see TXN_DONE after dma abort */
+		if (!list_empty(&ep->queue)) {
+			req = container_of(ep->queue.next,
+						struct omap_req, queue);
+			finish_in_dma(ep, req, 0);
+		}
+		omap_writew(UDC_TXN_DONE, UDC_IRQ_SRC);
+
+		if (!list_empty (&ep->queue)) {
+			req = container_of(ep->queue.next,
+					struct omap_req, queue);
+			next_in_dma(ep, req);
+		}
+	}
+
+	/* OUT dma: rx from host */
+	if (irq_src & UDC_RXN_EOT) {
+		ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
+		ep->irqs++;
+		/* can see RXN_EOT after dma abort */
+		if (!list_empty(&ep->queue)) {
+			req = container_of(ep->queue.next,
+					struct omap_req, queue);
+			finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB);
+		}
+		omap_writew(UDC_RXN_EOT, UDC_IRQ_SRC);
+
+		if (!list_empty (&ep->queue)) {
+			req = container_of(ep->queue.next,
+					struct omap_req, queue);
+			next_out_dma(ep, req);
+		}
+	}
+
+	if (irq_src & UDC_RXN_CNT) {
+		ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
+		ep->irqs++;
+		/* omap15xx does this unasked... */
+		VDBG("%s, RX_CNT irq?\n", ep->ep.name);
+		omap_writew(UDC_RXN_CNT, UDC_IRQ_SRC);
+	}
+}
+
+static void dma_error(int lch, u16 ch_status, void *data)
+{
+	struct omap_ep	*ep = data;
+
+	/* if ch_status & OMAP_DMA_DROP_IRQ ... */
+	/* if ch_status & OMAP1_DMA_TOUT_IRQ ... */
+	ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status);
+
+	/* complete current transfer ... */
+}
+
+static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
+{
+	u16	reg;
+	int	status, restart, is_in;
+	int	dma_channel;
+
+	is_in = ep->bEndpointAddress & USB_DIR_IN;
+	if (is_in)
+		reg = omap_readw(UDC_TXDMA_CFG);
+	else
+		reg = omap_readw(UDC_RXDMA_CFG);
+	reg |= UDC_DMA_REQ;		/* "pulse" activated */
+
+	ep->dma_channel = 0;
+	ep->lch = -1;
+	if (channel == 0 || channel > 3) {
+		if ((reg & 0x0f00) == 0)
+			channel = 3;
+		else if ((reg & 0x00f0) == 0)
+			channel = 2;
+		else if ((reg & 0x000f) == 0)	/* preferred for ISO */
+			channel = 1;
+		else {
+			status = -EMLINK;
+			goto just_restart;
+		}
+	}
+	reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1));
+	ep->dma_channel = channel;
+
+	if (is_in) {
+		if (cpu_is_omap24xx())
+			dma_channel = OMAP24XX_DMA(USB_W2FC_TX0, channel);
+		else
+			dma_channel = OMAP_DMA_USB_W2FC_TX0 - 1 + channel;
+		status = omap_request_dma(dma_channel,
+			ep->ep.name, dma_error, ep, &ep->lch);
+		if (status == 0) {
+			omap_writew(reg, UDC_TXDMA_CFG);
+			/* EMIFF or SDRC */
+			omap_set_dma_src_burst_mode(ep->lch,
+						OMAP_DMA_DATA_BURST_4);
+			omap_set_dma_src_data_pack(ep->lch, 1);
+			/* TIPB */
+			omap_set_dma_dest_params(ep->lch,
+				OMAP_DMA_PORT_TIPB,
+				OMAP_DMA_AMODE_CONSTANT,
+				UDC_DATA_DMA,
+				0, 0);
+		}
+	} else {
+		if (cpu_is_omap24xx())
+			dma_channel = OMAP24XX_DMA(USB_W2FC_RX0, channel);
+		else
+			dma_channel = OMAP_DMA_USB_W2FC_RX0 - 1 + channel;
+
+		status = omap_request_dma(dma_channel,
+			ep->ep.name, dma_error, ep, &ep->lch);
+		if (status == 0) {
+			omap_writew(reg, UDC_RXDMA_CFG);
+			/* TIPB */
+			omap_set_dma_src_params(ep->lch,
+				OMAP_DMA_PORT_TIPB,
+				OMAP_DMA_AMODE_CONSTANT,
+				UDC_DATA_DMA,
+				0, 0);
+			/* EMIFF or SDRC */
+			omap_set_dma_dest_burst_mode(ep->lch,
+						OMAP_DMA_DATA_BURST_4);
+			omap_set_dma_dest_data_pack(ep->lch, 1);
+		}
+	}
+	if (status)
+		ep->dma_channel = 0;
+	else {
+		ep->has_dma = 1;
+		omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ);
+
+		/* channel type P: hw synch (fifo) */
+		if (cpu_class_is_omap1() && !cpu_is_omap15xx())
+			omap_set_dma_channel_mode(ep->lch, OMAP_DMA_LCH_P);
+	}
+
+just_restart:
+	/* restart any queue, even if the claim failed  */
+	restart = !ep->stopped && !list_empty(&ep->queue);
+
+	if (status)
+		DBG("%s no dma channel: %d%s\n", ep->ep.name, status,
+			restart ? " (restart)" : "");
+	else
+		DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name,
+			is_in ? 't' : 'r',
+			ep->dma_channel - 1, ep->lch,
+			restart ? " (restart)" : "");
+
+	if (restart) {
+		struct omap_req	*req;
+		req = container_of(ep->queue.next, struct omap_req, queue);
+		if (ep->has_dma)
+			(is_in ? next_in_dma : next_out_dma)(ep, req);
+		else {
+			use_ep(ep, UDC_EP_SEL);
+			(is_in ? write_fifo : read_fifo)(ep, req);
+			deselect_ep();
+			if (!is_in) {
+				omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+				ep->ackwait = 1 + ep->double_buf;
+			}
+			/* IN: 6 wait states before it'll tx */
+		}
+	}
+}
+
+static void dma_channel_release(struct omap_ep *ep)
+{
+	int		shift = 4 * (ep->dma_channel - 1);
+	u16		mask = 0x0f << shift;
+	struct omap_req	*req;
+	int		active;
+
+	/* abort any active usb transfer request */
+	if (!list_empty(&ep->queue))
+		req = container_of(ep->queue.next, struct omap_req, queue);
+	else
+		req = NULL;
+
+	active = omap_get_dma_active_status(ep->lch);
+
+	DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
+			active ? "active" : "idle",
+			(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
+			ep->dma_channel - 1, req);
+
+	/* NOTE: re-setting RX_REQ/TX_REQ because of a chip bug (before
+	 * OMAP 1710 ES2.0) where reading the DMA_CFG can clear them.
+	 */
+
+	/* wait till current packet DMA finishes, and fifo empties */
+	if (ep->bEndpointAddress & USB_DIR_IN) {
+		omap_writew((omap_readw(UDC_TXDMA_CFG) & ~mask) | UDC_DMA_REQ,
+					UDC_TXDMA_CFG);
+
+		if (req) {
+			finish_in_dma(ep, req, -ECONNRESET);
+
+			/* clear FIFO; hosts probably won't empty it */
+			use_ep(ep, UDC_EP_SEL);
+			omap_writew(UDC_CLR_EP, UDC_CTRL);
+			deselect_ep();
+		}
+		while (omap_readw(UDC_TXDMA_CFG) & mask)
+			udelay(10);
+	} else {
+		omap_writew((omap_readw(UDC_RXDMA_CFG) & ~mask) | UDC_DMA_REQ,
+					UDC_RXDMA_CFG);
+
+		/* dma empties the fifo */
+		while (omap_readw(UDC_RXDMA_CFG) & mask)
+			udelay(10);
+		if (req)
+			finish_out_dma(ep, req, -ECONNRESET, 0);
+	}
+	omap_free_dma(ep->lch);
+	ep->dma_channel = 0;
+	ep->lch = -1;
+	/* has_dma still set, till endpoint is fully quiesced */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int
+omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	struct omap_req	*req = container_of(_req, struct omap_req, req);
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	int		is_iso = 0;
+
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		DBG("%s, bad params\n", __func__);
+		return -EINVAL;
+	}
+	if (!_ep || (!ep->desc && ep->bEndpointAddress)) {
+		DBG("%s, bad ep\n", __func__);
+		return -EINVAL;
+	}
+	if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+		is_iso = 1;
+	}
+
+	/* this isn't bogus, but OMAP DMA isn't the only hardware to
+	 * have a hard time with partial packet reads...  reject it.
+	 * Except OMAP2 can handle the small packets.
+	 */
+	if (use_dma
+			&& ep->has_dma
+			&& ep->bEndpointAddress != 0
+			&& (ep->bEndpointAddress & USB_DIR_IN) == 0
+			&& !cpu_class_is_omap2()
+			&& (req->req.length % ep->ep.maxpacket) != 0) {
+		DBG("%s, no partial packet OUT reads\n", __func__);
+		return -EMSGSIZE;
+	}
+
+	udc = ep->udc;
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	if (use_dma && ep->has_dma) {
+		if (req->req.dma == DMA_ADDR_INVALID) {
+			req->req.dma = dma_map_single(
+				ep->udc->gadget.dev.parent,
+				req->req.buf,
+				req->req.length,
+				(ep->bEndpointAddress & USB_DIR_IN)
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+			req->mapped = 1;
+		} else {
+			dma_sync_single_for_device(
+				ep->udc->gadget.dev.parent,
+				req->req.dma, req->req.length,
+				(ep->bEndpointAddress & USB_DIR_IN)
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+			req->mapped = 0;
+		}
+	}
+
+	VDBG("%s queue req %p, len %d buf %p\n",
+		ep->ep.name, _req, _req->length, _req->buf);
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+
+	/* maybe kickstart non-iso i/o queues */
+	if (is_iso) {
+		u16 w;
+
+		w = omap_readw(UDC_IRQ_EN);
+		w |= UDC_SOF_IE;
+		omap_writew(w, UDC_IRQ_EN);
+	} else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
+		int	is_in;
+
+		if (ep->bEndpointAddress == 0) {
+			if (!udc->ep0_pending || !list_empty (&ep->queue)) {
+				spin_unlock_irqrestore(&udc->lock, flags);
+				return -EL2HLT;
+			}
+
+			/* empty DATA stage? */
+			is_in = udc->ep0_in;
+			if (!req->req.length) {
+
+				/* chip became CONFIGURED or ADDRESSED
+				 * earlier; drivers may already have queued
+				 * requests to non-control endpoints
+				 */
+				if (udc->ep0_set_config) {
+					u16	irq_en = omap_readw(UDC_IRQ_EN);
+
+					irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
+					if (!udc->ep0_reset_config)
+						irq_en |= UDC_EPN_RX_IE
+							| UDC_EPN_TX_IE;
+					omap_writew(irq_en, UDC_IRQ_EN);
+				}
+
+				/* STATUS for zero length DATA stages is
+				 * always an IN ... even for IN transfers,
+				 * a weird case which seem to stall OMAP.
+				 */
+				omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM);
+				omap_writew(UDC_CLR_EP, UDC_CTRL);
+				omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+				omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+
+				/* cleanup */
+				udc->ep0_pending = 0;
+				done(ep, req, 0);
+				req = NULL;
+
+			/* non-empty DATA stage */
+			} else if (is_in) {
+				omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM);
+			} else {
+				if (udc->ep0_setup)
+					goto irq_wait;
+				omap_writew(UDC_EP_SEL, UDC_EP_NUM);
+			}
+		} else {
+			is_in = ep->bEndpointAddress & USB_DIR_IN;
+			if (!ep->has_dma)
+				use_ep(ep, UDC_EP_SEL);
+			/* if ISO: SOF IRQs must be enabled/disabled! */
+		}
+
+		if (ep->has_dma)
+			(is_in ? next_in_dma : next_out_dma)(ep, req);
+		else if (req) {
+			if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
+				req = NULL;
+			deselect_ep();
+			if (!is_in) {
+				omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+				ep->ackwait = 1 + ep->double_buf;
+			}
+			/* IN: 6 wait states before it'll tx */
+		}
+	}
+
+irq_wait:
+	/* irq handler advances the queue */
+	if (req != NULL)
+		list_add_tail(&req->queue, &ep->queue);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	struct omap_req	*req;
+	unsigned long	flags;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore(&ep->udc->lock, flags);
+		return -EINVAL;
+	}
+
+	if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
+		int channel = ep->dma_channel;
+
+		/* releasing the channel cancels the request,
+		 * reclaiming the channel restarts the queue
+		 */
+		dma_channel_release(ep);
+		dma_channel_claim(ep, channel);
+	} else
+		done(ep, req, -ECONNRESET);
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct omap_ep	*ep = container_of(_ep, struct omap_ep, ep);
+	unsigned long	flags;
+	int		status = -EOPNOTSUPP;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+
+	/* just use protocol stalls for ep0; real halts are annoying */
+	if (ep->bEndpointAddress == 0) {
+		if (!ep->udc->ep0_pending)
+			status = -EINVAL;
+		else if (value) {
+			if (ep->udc->ep0_set_config) {
+				WARNING("error changing config?\n");
+				omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
+			}
+			omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
+			ep->udc->ep0_pending = 0;
+			status = 0;
+		} else /* NOP */
+			status = 0;
+
+	/* otherwise, all active non-ISO endpoints can halt */
+	} else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->desc) {
+
+		/* IN endpoints must already be idle */
+		if ((ep->bEndpointAddress & USB_DIR_IN)
+				&& !list_empty(&ep->queue)) {
+			status = -EAGAIN;
+			goto done;
+		}
+
+		if (value) {
+			int	channel;
+
+			if (use_dma && ep->dma_channel
+					&& !list_empty(&ep->queue)) {
+				channel = ep->dma_channel;
+				dma_channel_release(ep);
+			} else
+				channel = 0;
+
+			use_ep(ep, UDC_EP_SEL);
+			if (omap_readw(UDC_STAT_FLG) & UDC_NON_ISO_FIFO_EMPTY) {
+				omap_writew(UDC_SET_HALT, UDC_CTRL);
+				status = 0;
+			} else
+				status = -EAGAIN;
+			deselect_ep();
+
+			if (channel)
+				dma_channel_claim(ep, channel);
+		} else {
+			use_ep(ep, 0);
+			omap_writew(ep->udc->clr_halt, UDC_CTRL);
+			ep->ackwait = 0;
+			if (!(ep->bEndpointAddress & USB_DIR_IN)) {
+				omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+				ep->ackwait = 1 + ep->double_buf;
+			}
+		}
+	}
+done:
+	VDBG("%s %s halt stat %d\n", ep->ep.name,
+		value ? "set" : "clear", status);
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return status;
+}
+
+static struct usb_ep_ops omap_ep_ops = {
+	.enable		= omap_ep_enable,
+	.disable	= omap_ep_disable,
+
+	.alloc_request	= omap_alloc_request,
+	.free_request	= omap_free_request,
+
+	.queue		= omap_ep_queue,
+	.dequeue	= omap_ep_dequeue,
+
+	.set_halt	= omap_ep_set_halt,
+	// fifo_status ... report bytes in fifo
+	// fifo_flush ... flush fifo
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_get_frame(struct usb_gadget *gadget)
+{
+	u16	sof = omap_readw(UDC_SOF);
+	return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
+}
+
+static int omap_wakeup(struct usb_gadget *gadget)
+{
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	int		retval = -EHOSTUNREACH;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+
+	spin_lock_irqsave(&udc->lock, flags);
+	if (udc->devstat & UDC_SUS) {
+		/* NOTE:  OTG spec erratum says that OTG devices may
+		 * issue wakeups without host enable.
+		 */
+		if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
+			DBG("remote wakeup...\n");
+			omap_writew(UDC_RMT_WKP, UDC_SYSCON2);
+			retval = 0;
+		}
+
+	/* NOTE:  non-OTG systems may use SRP TOO... */
+	} else if (!(udc->devstat & UDC_ATT)) {
+		if (udc->transceiver)
+			retval = otg_start_srp(udc->transceiver->otg);
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return retval;
+}
+
+static int
+omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
+{
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	u16		syscon1;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+	syscon1 = omap_readw(UDC_SYSCON1);
+	if (is_selfpowered)
+		syscon1 |= UDC_SELF_PWR;
+	else
+		syscon1 &= ~UDC_SELF_PWR;
+	omap_writew(syscon1, UDC_SYSCON1);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+static int can_pullup(struct omap_udc *udc)
+{
+	return udc->driver && udc->softconnect && udc->vbus_active;
+}
+
+static void pullup_enable(struct omap_udc *udc)
+{
+	u16 w;
+
+	w = omap_readw(UDC_SYSCON1);
+	w |= UDC_PULLUP_EN;
+	omap_writew(w, UDC_SYSCON1);
+	if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
+		u32 l;
+
+		l = omap_readl(OTG_CTRL);
+		l |= OTG_BSESSVLD;
+		omap_writel(l, OTG_CTRL);
+	}
+	omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
+}
+
+static void pullup_disable(struct omap_udc *udc)
+{
+	u16 w;
+
+	if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
+		u32 l;
+
+		l = omap_readl(OTG_CTRL);
+		l &= ~OTG_BSESSVLD;
+		omap_writel(l, OTG_CTRL);
+	}
+	omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
+	w = omap_readw(UDC_SYSCON1);
+	w &= ~UDC_PULLUP_EN;
+	omap_writew(w, UDC_SYSCON1);
+}
+
+static struct omap_udc *udc;
+
+static void omap_udc_enable_clock(int enable)
+{
+	if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL)
+		return;
+
+	if (enable) {
+		clk_enable(udc->dc_clk);
+		clk_enable(udc->hhc_clk);
+		udelay(100);
+	} else {
+		clk_disable(udc->hhc_clk);
+		clk_disable(udc->dc_clk);
+	}
+}
+
+/*
+ * Called by whatever detects VBUS sessions:  external transceiver
+ * driver, or maybe GPIO0 VBUS IRQ.  May request 48 MHz clock.
+ */
+static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct omap_udc	*udc;
+	unsigned long	flags;
+	u32 l;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+	VDBG("VBUS %s\n", is_active ? "on" : "off");
+	udc->vbus_active = (is_active != 0);
+	if (cpu_is_omap15xx()) {
+		/* "software" detect, ignored if !VBUS_MODE_1510 */
+		l = omap_readl(FUNC_MUX_CTRL_0);
+		if (is_active)
+			l |= VBUS_CTRL_1510;
+		else
+			l &= ~VBUS_CTRL_1510;
+		omap_writel(l, FUNC_MUX_CTRL_0);
+	}
+	if (udc->dc_clk != NULL && is_active) {
+		if (!udc->clk_requested) {
+			omap_udc_enable_clock(1);
+			udc->clk_requested = 1;
+		}
+	}
+	if (can_pullup(udc))
+		pullup_enable(udc);
+	else
+		pullup_disable(udc);
+	if (udc->dc_clk != NULL && !is_active) {
+		if (udc->clk_requested) {
+			omap_udc_enable_clock(0);
+			udc->clk_requested = 0;
+		}
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	struct omap_udc	*udc;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+	if (udc->transceiver)
+		return usb_phy_set_power(udc->transceiver, mA);
+	return -EOPNOTSUPP;
+}
+
+static int omap_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct omap_udc	*udc;
+	unsigned long	flags;
+
+	udc = container_of(gadget, struct omap_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+	udc->softconnect = (is_on != 0);
+	if (can_pullup(udc))
+		pullup_enable(udc);
+	else
+		pullup_disable(udc);
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int omap_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int omap_udc_stop(struct usb_gadget_driver *driver);
+
+static struct usb_gadget_ops omap_gadget_ops = {
+	.get_frame		= omap_get_frame,
+	.wakeup			= omap_wakeup,
+	.set_selfpowered	= omap_set_selfpowered,
+	.vbus_session		= omap_vbus_session,
+	.vbus_draw		= omap_vbus_draw,
+	.pullup			= omap_pullup,
+	.start			= omap_udc_start,
+	.stop			= omap_udc_stop,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* dequeue ALL requests; caller holds udc->lock */
+static void nuke(struct omap_ep *ep, int status)
+{
+	struct omap_req	*req;
+
+	ep->stopped = 1;
+
+	if (use_dma && ep->dma_channel)
+		dma_channel_release(ep);
+
+	use_ep(ep, 0);
+	omap_writew(UDC_CLR_EP, UDC_CTRL);
+	if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
+		omap_writew(UDC_SET_HALT, UDC_CTRL);
+
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct omap_req, queue);
+		done(ep, req, status);
+	}
+}
+
+/* caller holds udc->lock */
+static void udc_quiesce(struct omap_udc *udc)
+{
+	struct omap_ep	*ep;
+
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	nuke(&udc->ep[0], -ESHUTDOWN);
+	list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list)
+		nuke(ep, -ESHUTDOWN);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void update_otg(struct omap_udc *udc)
+{
+	u16	devstat;
+
+	if (!gadget_is_otg(&udc->gadget))
+		return;
+
+	if (omap_readl(OTG_CTRL) & OTG_ID)
+		devstat = omap_readw(UDC_DEVSTAT);
+	else
+		devstat = 0;
+
+	udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE);
+	udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT);
+	udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT);
+
+	/* Enable HNP early, avoiding races on suspend irq path.
+	 * ASSUMES OTG state machine B_BUS_REQ input is true.
+	 */
+	if (udc->gadget.b_hnp_enable) {
+		u32 l;
+
+		l = omap_readl(OTG_CTRL);
+		l |= OTG_B_HNPEN | OTG_B_BUSREQ;
+		l &= ~OTG_PULLUP;
+		omap_writel(l, OTG_CTRL);
+	}
+}
+
+static void ep0_irq(struct omap_udc *udc, u16 irq_src)
+{
+	struct omap_ep	*ep0 = &udc->ep[0];
+	struct omap_req	*req = NULL;
+
+	ep0->irqs++;
+
+	/* Clear any pending requests and then scrub any rx/tx state
+	 * before starting to handle the SETUP request.
+	 */
+	if (irq_src & UDC_SETUP) {
+		u16	ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX);
+
+		nuke(ep0, 0);
+		if (ack) {
+			omap_writew(ack, UDC_IRQ_SRC);
+			irq_src = UDC_SETUP;
+		}
+	}
+
+	/* IN/OUT packets mean we're in the DATA or STATUS stage.
+	 * This driver uses only uses protocol stalls (ep0 never halts),
+	 * and if we got this far the gadget driver already had a
+	 * chance to stall.  Tries to be forgiving of host oddities.
+	 *
+	 * NOTE:  the last chance gadget drivers have to stall control
+	 * requests is during their request completion callback.
+	 */
+	if (!list_empty(&ep0->queue))
+		req = container_of(ep0->queue.next, struct omap_req, queue);
+
+	/* IN == TX to host */
+	if (irq_src & UDC_EP0_TX) {
+		int	stat;
+
+		omap_writew(UDC_EP0_TX, UDC_IRQ_SRC);
+		omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
+		stat = omap_readw(UDC_STAT_FLG);
+		if (stat & UDC_ACK) {
+			if (udc->ep0_in) {
+				/* write next IN packet from response,
+				 * or set up the status stage.
+				 */
+				if (req)
+					stat = write_fifo(ep0, req);
+				omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+				if (!req && udc->ep0_pending) {
+					omap_writew(UDC_EP_SEL, UDC_EP_NUM);
+					omap_writew(UDC_CLR_EP, UDC_CTRL);
+					omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+					omap_writew(0, UDC_EP_NUM);
+					udc->ep0_pending = 0;
+				} /* else:  6 wait states before it'll tx */
+			} else {
+				/* ack status stage of OUT transfer */
+				omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+				if (req)
+					done(ep0, req, 0);
+			}
+			req = NULL;
+		} else if (stat & UDC_STALL) {
+			omap_writew(UDC_CLR_HALT, UDC_CTRL);
+			omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+		} else {
+			omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+		}
+	}
+
+	/* OUT == RX from host */
+	if (irq_src & UDC_EP0_RX) {
+		int	stat;
+
+		omap_writew(UDC_EP0_RX, UDC_IRQ_SRC);
+		omap_writew(UDC_EP_SEL, UDC_EP_NUM);
+		stat = omap_readw(UDC_STAT_FLG);
+		if (stat & UDC_ACK) {
+			if (!udc->ep0_in) {
+				stat = 0;
+				/* read next OUT packet of request, maybe
+				 * reactiviting the fifo; stall on errors.
+				 */
+				if (!req || (stat = read_fifo(ep0, req)) < 0) {
+					omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
+					udc->ep0_pending = 0;
+					stat = 0;
+				} else if (stat == 0)
+					omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+				omap_writew(0, UDC_EP_NUM);
+
+				/* activate status stage */
+				if (stat == 1) {
+					done(ep0, req, 0);
+					/* that may have STALLed ep0... */
+					omap_writew(UDC_EP_SEL | UDC_EP_DIR,
+							UDC_EP_NUM);
+					omap_writew(UDC_CLR_EP, UDC_CTRL);
+					omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+					omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+					udc->ep0_pending = 0;
+				}
+			} else {
+				/* ack status stage of IN transfer */
+				omap_writew(0, UDC_EP_NUM);
+				if (req)
+					done(ep0, req, 0);
+			}
+		} else if (stat & UDC_STALL) {
+			omap_writew(UDC_CLR_HALT, UDC_CTRL);
+			omap_writew(0, UDC_EP_NUM);
+		} else {
+			omap_writew(0, UDC_EP_NUM);
+		}
+	}
+
+	/* SETUP starts all control transfers */
+	if (irq_src & UDC_SETUP) {
+		union u {
+			u16			word[4];
+			struct usb_ctrlrequest	r;
+		} u;
+		int			status = -EINVAL;
+		struct omap_ep		*ep;
+
+		/* read the (latest) SETUP message */
+		do {
+			omap_writew(UDC_SETUP_SEL, UDC_EP_NUM);
+			/* two bytes at a time */
+			u.word[0] = omap_readw(UDC_DATA);
+			u.word[1] = omap_readw(UDC_DATA);
+			u.word[2] = omap_readw(UDC_DATA);
+			u.word[3] = omap_readw(UDC_DATA);
+			omap_writew(0, UDC_EP_NUM);
+		} while (omap_readw(UDC_IRQ_SRC) & UDC_SETUP);
+
+#define	w_value		le16_to_cpu(u.r.wValue)
+#define	w_index		le16_to_cpu(u.r.wIndex)
+#define	w_length	le16_to_cpu(u.r.wLength)
+
+		/* Delegate almost all control requests to the gadget driver,
+		 * except for a handful of ch9 status/feature requests that
+		 * hardware doesn't autodecode _and_ the gadget API hides.
+		 */
+		udc->ep0_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+		udc->ep0_set_config = 0;
+		udc->ep0_pending = 1;
+		ep0->stopped = 0;
+		ep0->ackwait = 0;
+		switch (u.r.bRequest) {
+		case USB_REQ_SET_CONFIGURATION:
+			/* udc needs to know when ep != 0 is valid */
+			if (u.r.bRequestType != USB_RECIP_DEVICE)
+				goto delegate;
+			if (w_length != 0)
+				goto do_stall;
+			udc->ep0_set_config = 1;
+			udc->ep0_reset_config = (w_value == 0);
+			VDBG("set config %d\n", w_value);
+
+			/* update udc NOW since gadget driver may start
+			 * queueing requests immediately; clear config
+			 * later if it fails the request.
+			 */
+			if (udc->ep0_reset_config)
+				omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
+			else
+				omap_writew(UDC_DEV_CFG, UDC_SYSCON2);
+			update_otg(udc);
+			goto delegate;
+		case USB_REQ_CLEAR_FEATURE:
+			/* clear endpoint halt */
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (w_value != USB_ENDPOINT_HALT
+					|| w_length != 0)
+				goto do_stall;
+			ep = &udc->ep[w_index & 0xf];
+			if (ep != ep0) {
+				if (w_index & USB_DIR_IN)
+					ep += 16;
+				if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+						|| !ep->desc)
+					goto do_stall;
+				use_ep(ep, 0);
+				omap_writew(udc->clr_halt, UDC_CTRL);
+				ep->ackwait = 0;
+				if (!(ep->bEndpointAddress & USB_DIR_IN)) {
+					omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+					ep->ackwait = 1 + ep->double_buf;
+				}
+				/* NOTE:  assumes the host behaves sanely,
+				 * only clearing real halts.  Else we may
+				 * need to kill pending transfers and then
+				 * restart the queue... very messy for DMA!
+				 */
+			}
+			VDBG("%s halt cleared by host\n", ep->name);
+			goto ep0out_status_stage;
+		case USB_REQ_SET_FEATURE:
+			/* set endpoint halt */
+			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+				goto delegate;
+			if (w_value != USB_ENDPOINT_HALT
+					|| w_length != 0)
+				goto do_stall;
+			ep = &udc->ep[w_index & 0xf];
+			if (w_index & USB_DIR_IN)
+				ep += 16;
+			if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+					|| ep == ep0 || !ep->desc)
+				goto do_stall;
+			if (use_dma && ep->has_dma) {
+				/* this has rude side-effects (aborts) and
+				 * can't really work if DMA-IN is active
+				 */
+				DBG("%s host set_halt, NYET \n", ep->name);
+				goto do_stall;
+			}
+			use_ep(ep, 0);
+			/* can't halt if fifo isn't empty... */
+			omap_writew(UDC_CLR_EP, UDC_CTRL);
+			omap_writew(UDC_SET_HALT, UDC_CTRL);
+			VDBG("%s halted by host\n", ep->name);
+ep0out_status_stage:
+			status = 0;
+			omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
+			omap_writew(UDC_CLR_EP, UDC_CTRL);
+			omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+			omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+			udc->ep0_pending = 0;
+			break;
+		case USB_REQ_GET_STATUS:
+			/* USB_ENDPOINT_HALT status? */
+			if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
+				goto intf_status;
+
+			/* ep0 never stalls */
+			if (!(w_index & 0xf))
+				goto zero_status;
+
+			/* only active endpoints count */
+			ep = &udc->ep[w_index & 0xf];
+			if (w_index & USB_DIR_IN)
+				ep += 16;
+			if (!ep->desc)
+				goto do_stall;
+
+			/* iso never stalls */
+			if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+				goto zero_status;
+
+			/* FIXME don't assume non-halted endpoints!! */
+			ERR("%s status, can't report\n", ep->ep.name);
+			goto do_stall;
+
+intf_status:
+			/* return interface status.  if we were pedantic,
+			 * we'd detect non-existent interfaces, and stall.
+			 */
+			if (u.r.bRequestType
+					!= (USB_DIR_IN|USB_RECIP_INTERFACE))
+				goto delegate;
+
+zero_status:
+			/* return two zero bytes */
+			omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
+			omap_writew(0, UDC_DATA);
+			omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+			omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+			status = 0;
+			VDBG("GET_STATUS, interface %d\n", w_index);
+			/* next, status stage */
+			break;
+		default:
+delegate:
+			/* activate the ep0out fifo right away */
+			if (!udc->ep0_in && w_length) {
+				omap_writew(0, UDC_EP_NUM);
+				omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+			}
+
+			/* gadget drivers see class/vendor specific requests,
+			 * {SET,GET}_{INTERFACE,DESCRIPTOR,CONFIGURATION},
+			 * and more
+			 */
+			VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
+				u.r.bRequestType, u.r.bRequest,
+				w_value, w_index, w_length);
+
+#undef	w_value
+#undef	w_index
+#undef	w_length
+
+			/* The gadget driver may return an error here,
+			 * causing an immediate protocol stall.
+			 *
+			 * Else it must issue a response, either queueing a
+			 * response buffer for the DATA stage, or halting ep0
+			 * (causing a protocol stall, not a real halt).  A
+			 * zero length buffer means no DATA stage.
+			 *
+			 * It's fine to issue that response after the setup()
+			 * call returns, and this IRQ was handled.
+			 */
+			udc->ep0_setup = 1;
+			spin_unlock(&udc->lock);
+			status = udc->driver->setup (&udc->gadget, &u.r);
+			spin_lock(&udc->lock);
+			udc->ep0_setup = 0;
+		}
+
+		if (status < 0) {
+do_stall:
+			VDBG("req %02x.%02x protocol STALL; stat %d\n",
+					u.r.bRequestType, u.r.bRequest, status);
+			if (udc->ep0_set_config) {
+				if (udc->ep0_reset_config)
+					WARNING("error resetting config?\n");
+				else
+					omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
+			}
+			omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
+			udc->ep0_pending = 0;
+		}
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define OTG_FLAGS (UDC_B_HNP_ENABLE|UDC_A_HNP_SUPPORT|UDC_A_ALT_HNP_SUPPORT)
+
+static void devstate_irq(struct omap_udc *udc, u16 irq_src)
+{
+	u16	devstat, change;
+
+	devstat = omap_readw(UDC_DEVSTAT);
+	change = devstat ^ udc->devstat;
+	udc->devstat = devstat;
+
+	if (change & (UDC_USB_RESET|UDC_ATT)) {
+		udc_quiesce(udc);
+
+		if (change & UDC_ATT) {
+			/* driver for any external transceiver will
+			 * have called omap_vbus_session() already
+			 */
+			if (devstat & UDC_ATT) {
+				udc->gadget.speed = USB_SPEED_FULL;
+				VDBG("connect\n");
+				if (!udc->transceiver)
+					pullup_enable(udc);
+				// if (driver->connect) call it
+			} else if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+				udc->gadget.speed = USB_SPEED_UNKNOWN;
+				if (!udc->transceiver)
+					pullup_disable(udc);
+				DBG("disconnect, gadget %s\n",
+					udc->driver->driver.name);
+				if (udc->driver->disconnect) {
+					spin_unlock(&udc->lock);
+					udc->driver->disconnect(&udc->gadget);
+					spin_lock(&udc->lock);
+				}
+			}
+			change &= ~UDC_ATT;
+		}
+
+		if (change & UDC_USB_RESET) {
+			if (devstat & UDC_USB_RESET) {
+				VDBG("RESET=1\n");
+			} else {
+				udc->gadget.speed = USB_SPEED_FULL;
+				INFO("USB reset done, gadget %s\n",
+					udc->driver->driver.name);
+				/* ep0 traffic is legal from now on */
+				omap_writew(UDC_DS_CHG_IE | UDC_EP0_IE,
+						UDC_IRQ_EN);
+			}
+			change &= ~UDC_USB_RESET;
+		}
+	}
+	if (change & UDC_SUS) {
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+			// FIXME tell isp1301 to suspend/resume (?)
+			if (devstat & UDC_SUS) {
+				VDBG("suspend\n");
+				update_otg(udc);
+				/* HNP could be under way already */
+				if (udc->gadget.speed == USB_SPEED_FULL
+						&& udc->driver->suspend) {
+					spin_unlock(&udc->lock);
+					udc->driver->suspend(&udc->gadget);
+					spin_lock(&udc->lock);
+				}
+				if (udc->transceiver)
+					usb_phy_set_suspend(
+							udc->transceiver, 1);
+			} else {
+				VDBG("resume\n");
+				if (udc->transceiver)
+					usb_phy_set_suspend(
+							udc->transceiver, 0);
+				if (udc->gadget.speed == USB_SPEED_FULL
+						&& udc->driver->resume) {
+					spin_unlock(&udc->lock);
+					udc->driver->resume(&udc->gadget);
+					spin_lock(&udc->lock);
+				}
+			}
+		}
+		change &= ~UDC_SUS;
+	}
+	if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) {
+		update_otg(udc);
+		change &= ~OTG_FLAGS;
+	}
+
+	change &= ~(UDC_CFG|UDC_DEF|UDC_ADD);
+	if (change)
+		VDBG("devstat %03x, ignore change %03x\n",
+			devstat,  change);
+
+	omap_writew(UDC_DS_CHG, UDC_IRQ_SRC);
+}
+
+static irqreturn_t omap_udc_irq(int irq, void *_udc)
+{
+	struct omap_udc	*udc = _udc;
+	u16		irq_src;
+	irqreturn_t	status = IRQ_NONE;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	irq_src = omap_readw(UDC_IRQ_SRC);
+
+	/* Device state change (usb ch9 stuff) */
+	if (irq_src & UDC_DS_CHG) {
+		devstate_irq(_udc, irq_src);
+		status = IRQ_HANDLED;
+		irq_src &= ~UDC_DS_CHG;
+	}
+
+	/* EP0 control transfers */
+	if (irq_src & (UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX)) {
+		ep0_irq(_udc, irq_src);
+		status = IRQ_HANDLED;
+		irq_src &= ~(UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX);
+	}
+
+	/* DMA transfer completion */
+	if (use_dma && (irq_src & (UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT))) {
+		dma_irq(_udc, irq_src);
+		status = IRQ_HANDLED;
+		irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT);
+	}
+
+	irq_src &= ~(UDC_IRQ_SOF | UDC_EPN_TX|UDC_EPN_RX);
+	if (irq_src)
+		DBG("udc_irq, unhandled %03x\n", irq_src);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return status;
+}
+
+/* workaround for seemingly-lost IRQs for RX ACKs... */
+#define PIO_OUT_TIMEOUT	(jiffies + HZ/3)
+#define HALF_FULL(f)	(!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
+
+static void pio_out_timer(unsigned long _ep)
+{
+	struct omap_ep	*ep = (void *) _ep;
+	unsigned long	flags;
+	u16		stat_flg;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	if (!list_empty(&ep->queue) && ep->ackwait) {
+		use_ep(ep, UDC_EP_SEL);
+		stat_flg = omap_readw(UDC_STAT_FLG);
+
+		if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
+				|| (ep->double_buf && HALF_FULL(stat_flg)))) {
+			struct omap_req	*req;
+
+			VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
+			req = container_of(ep->queue.next,
+					struct omap_req, queue);
+			(void) read_fifo(ep, req);
+			omap_writew(ep->bEndpointAddress, UDC_EP_NUM);
+			omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+			ep->ackwait = 1 + ep->double_buf;
+		} else
+			deselect_ep();
+	}
+	mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+}
+
+static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
+{
+	u16		epn_stat, irq_src;
+	irqreturn_t	status = IRQ_NONE;
+	struct omap_ep	*ep;
+	int		epnum;
+	struct omap_udc	*udc = _dev;
+	struct omap_req	*req;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	epn_stat = omap_readw(UDC_EPN_STAT);
+	irq_src = omap_readw(UDC_IRQ_SRC);
+
+	/* handle OUT first, to avoid some wasteful NAKs */
+	if (irq_src & UDC_EPN_RX) {
+		epnum = (epn_stat >> 8) & 0x0f;
+		omap_writew(UDC_EPN_RX, UDC_IRQ_SRC);
+		status = IRQ_HANDLED;
+		ep = &udc->ep[epnum];
+		ep->irqs++;
+
+		omap_writew(epnum | UDC_EP_SEL, UDC_EP_NUM);
+		ep->fnf = 0;
+		if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
+			ep->ackwait--;
+			if (!list_empty(&ep->queue)) {
+				int stat;
+				req = container_of(ep->queue.next,
+						struct omap_req, queue);
+				stat = read_fifo(ep, req);
+				if (!ep->double_buf)
+					ep->fnf = 1;
+			}
+		}
+		/* min 6 clock delay before clearing EP_SEL ... */
+		epn_stat = omap_readw(UDC_EPN_STAT);
+		epn_stat = omap_readw(UDC_EPN_STAT);
+		omap_writew(epnum, UDC_EP_NUM);
+
+		/* enabling fifo _after_ clearing ACK, contrary to docs,
+		 * reduces lossage; timer still needed though (sigh).
+		 */
+		if (ep->fnf) {
+			omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+			ep->ackwait = 1 + ep->double_buf;
+		}
+		mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
+	}
+
+	/* then IN transfers */
+	else if (irq_src & UDC_EPN_TX) {
+		epnum = epn_stat & 0x0f;
+		omap_writew(UDC_EPN_TX, UDC_IRQ_SRC);
+		status = IRQ_HANDLED;
+		ep = &udc->ep[16 + epnum];
+		ep->irqs++;
+
+		omap_writew(epnum | UDC_EP_DIR | UDC_EP_SEL, UDC_EP_NUM);
+		if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
+			ep->ackwait = 0;
+			if (!list_empty(&ep->queue)) {
+				req = container_of(ep->queue.next,
+						struct omap_req, queue);
+				(void) write_fifo(ep, req);
+			}
+		}
+		/* min 6 clock delay before clearing EP_SEL ... */
+		epn_stat = omap_readw(UDC_EPN_STAT);
+		epn_stat = omap_readw(UDC_EPN_STAT);
+		omap_writew(epnum | UDC_EP_DIR, UDC_EP_NUM);
+		/* then 6 clocks before it'd tx */
+	}
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return status;
+}
+
+#ifdef	USE_ISO
+static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
+{
+	struct omap_udc	*udc = _dev;
+	struct omap_ep	*ep;
+	int		pending = 0;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* handle all non-DMA ISO transfers */
+	list_for_each_entry (ep, &udc->iso, iso) {
+		u16		stat;
+		struct omap_req	*req;
+
+		if (ep->has_dma || list_empty(&ep->queue))
+			continue;
+		req = list_entry(ep->queue.next, struct omap_req, queue);
+
+		use_ep(ep, UDC_EP_SEL);
+		stat = omap_readw(UDC_STAT_FLG);
+
+		/* NOTE: like the other controller drivers, this isn't
+		 * currently reporting lost or damaged frames.
+		 */
+		if (ep->bEndpointAddress & USB_DIR_IN) {
+			if (stat & UDC_MISS_IN)
+				/* done(ep, req, -EPROTO) */;
+			else
+				write_fifo(ep, req);
+		} else {
+			int	status = 0;
+
+			if (stat & UDC_NO_RXPACKET)
+				status = -EREMOTEIO;
+			else if (stat & UDC_ISO_ERR)
+				status = -EILSEQ;
+			else if (stat & UDC_DATA_FLUSH)
+				status = -ENOSR;
+
+			if (status)
+				/* done(ep, req, status) */;
+			else
+				read_fifo(ep, req);
+		}
+		deselect_ep();
+		/* 6 wait states before next EP */
+
+		ep->irqs++;
+		if (!list_empty(&ep->queue))
+			pending = 1;
+	}
+	if (!pending) {
+		u16 w;
+
+		w = omap_readw(UDC_IRQ_EN);
+		w &= ~UDC_SOF_IE;
+		omap_writew(w, UDC_IRQ_EN);
+	}
+	omap_writew(UDC_IRQ_SOF, UDC_IRQ_SRC);
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return IRQ_HANDLED;
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static inline int machine_without_vbus_sense(void)
+{
+	return (machine_is_omap_innovator()
+		|| machine_is_omap_osk()
+		|| machine_is_omap_apollon()
+#ifndef CONFIG_MACH_OMAP_H4_OTG
+		|| machine_is_omap_h4()
+#endif
+		|| machine_is_sx1()
+		|| cpu_is_omap7xx() /* No known omap7xx boards with vbus sense */
+		);
+}
+
+static int omap_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	int		status = -ENODEV;
+	struct omap_ep	*ep;
+	unsigned long	flags;
+
+	/* basic sanity tests */
+	if (!udc)
+		return -ENODEV;
+	if (!driver
+			// FIXME if otg, check:  driver->is_otg
+			|| driver->max_speed < USB_SPEED_FULL
+			|| !bind || !driver->setup)
+		return -EINVAL;
+
+	spin_lock_irqsave(&udc->lock, flags);
+	if (udc->driver) {
+		spin_unlock_irqrestore(&udc->lock, flags);
+		return -EBUSY;
+	}
+
+	/* reset state */
+	list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
+		ep->irqs = 0;
+		if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+			continue;
+		use_ep(ep, 0);
+		omap_writew(UDC_SET_HALT, UDC_CTRL);
+	}
+	udc->ep0_pending = 0;
+	udc->ep[0].irqs = 0;
+	udc->softconnect = 1;
+
+	/* hook up the driver */
+	driver->driver.bus = NULL;
+	udc->driver = driver;
+	udc->gadget.dev.driver = &driver->driver;
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	if (udc->dc_clk != NULL)
+		omap_udc_enable_clock(1);
+
+	status = bind(&udc->gadget);
+	if (status) {
+		DBG("bind to %s --> %d\n", driver->driver.name, status);
+		udc->gadget.dev.driver = NULL;
+		udc->driver = NULL;
+		goto done;
+	}
+	DBG("bound to driver %s\n", driver->driver.name);
+
+	omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
+
+	/* connect to bus through transceiver */
+	if (udc->transceiver) {
+		status = otg_set_peripheral(udc->transceiver->otg,
+						&udc->gadget);
+		if (status < 0) {
+			ERR("can't bind to transceiver\n");
+			if (driver->unbind) {
+				driver->unbind (&udc->gadget);
+				udc->gadget.dev.driver = NULL;
+				udc->driver = NULL;
+			}
+			goto done;
+		}
+	} else {
+		if (can_pullup(udc))
+			pullup_enable (udc);
+		else
+			pullup_disable (udc);
+	}
+
+	/* boards that don't have VBUS sensing can't autogate 48MHz;
+	 * can't enter deep sleep while a gadget driver is active.
+	 */
+	if (machine_without_vbus_sense())
+		omap_vbus_session(&udc->gadget, 1);
+
+done:
+	if (udc->dc_clk != NULL)
+		omap_udc_enable_clock(0);
+	return status;
+}
+
+static int omap_udc_stop(struct usb_gadget_driver *driver)
+{
+	unsigned long	flags;
+	int		status = -ENODEV;
+
+	if (!udc)
+		return -ENODEV;
+	if (!driver || driver != udc->driver || !driver->unbind)
+		return -EINVAL;
+
+	if (udc->dc_clk != NULL)
+		omap_udc_enable_clock(1);
+
+	if (machine_without_vbus_sense())
+		omap_vbus_session(&udc->gadget, 0);
+
+	if (udc->transceiver)
+		(void) otg_set_peripheral(udc->transceiver->otg, NULL);
+	else
+		pullup_disable(udc);
+
+	spin_lock_irqsave(&udc->lock, flags);
+	udc_quiesce(udc);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	driver->unbind(&udc->gadget);
+	udc->gadget.dev.driver = NULL;
+	udc->driver = NULL;
+
+	if (udc->dc_clk != NULL)
+		omap_udc_enable_clock(0);
+	DBG("unregistered driver '%s'\n", driver->driver.name);
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#include <linux/seq_file.h>
+
+static const char proc_filename[] = "driver/udc";
+
+#define FOURBITS "%s%s%s%s"
+#define EIGHTBITS FOURBITS FOURBITS
+
+static void proc_ep_show(struct seq_file *s, struct omap_ep *ep)
+{
+	u16		stat_flg;
+	struct omap_req	*req;
+	char		buf[20];
+
+	use_ep(ep, 0);
+
+	if (use_dma && ep->has_dma)
+		snprintf(buf, sizeof buf, "(%cxdma%d lch%d) ",
+			(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
+			ep->dma_channel - 1, ep->lch);
+	else
+		buf[0] = 0;
+
+	stat_flg = omap_readw(UDC_STAT_FLG);
+	seq_printf(s,
+		"\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n",
+		ep->name, buf,
+		ep->double_buf ? "dbuf " : "",
+		({char *s; switch(ep->ackwait){
+		case 0: s = ""; break;
+		case 1: s = "(ackw) "; break;
+		case 2: s = "(ackw2) "; break;
+		default: s = "(?) "; break;
+		} s;}),
+		ep->irqs, stat_flg,
+		(stat_flg & UDC_NO_RXPACKET) ? "no_rxpacket " : "",
+		(stat_flg & UDC_MISS_IN) ? "miss_in " : "",
+		(stat_flg & UDC_DATA_FLUSH) ? "data_flush " : "",
+		(stat_flg & UDC_ISO_ERR) ? "iso_err " : "",
+		(stat_flg & UDC_ISO_FIFO_EMPTY) ? "iso_fifo_empty " : "",
+		(stat_flg & UDC_ISO_FIFO_FULL) ? "iso_fifo_full " : "",
+		(stat_flg & UDC_EP_HALTED) ? "HALT " : "",
+		(stat_flg & UDC_STALL) ? "STALL " : "",
+		(stat_flg & UDC_NAK) ? "NAK " : "",
+		(stat_flg & UDC_ACK) ? "ACK " : "",
+		(stat_flg & UDC_FIFO_EN) ? "fifo_en " : "",
+		(stat_flg & UDC_NON_ISO_FIFO_EMPTY) ? "fifo_empty " : "",
+		(stat_flg & UDC_NON_ISO_FIFO_FULL) ? "fifo_full " : "");
+
+	if (list_empty (&ep->queue))
+		seq_printf(s, "\t(queue empty)\n");
+	else
+		list_for_each_entry (req, &ep->queue, queue) {
+			unsigned	length = req->req.actual;
+
+			if (use_dma && buf[0]) {
+				length += ((ep->bEndpointAddress & USB_DIR_IN)
+						? dma_src_len : dma_dest_len)
+					(ep, req->req.dma + length);
+				buf[0] = 0;
+			}
+			seq_printf(s, "\treq %p len %d/%d buf %p\n",
+					&req->req, length,
+					req->req.length, req->req.buf);
+		}
+}
+
+static char *trx_mode(unsigned m, int enabled)
+{
+	switch (m) {
+	case 0:		return enabled ? "*6wire" : "unused";
+	case 1:		return "4wire";
+	case 2:		return "3wire";
+	case 3:		return "6wire";
+	default:	return "unknown";
+	}
+}
+
+static int proc_otg_show(struct seq_file *s)
+{
+	u32		tmp;
+	u32		trans = 0;
+	char		*ctrl_name = "(UNKNOWN)";
+
+	/* XXX This needs major revision for OMAP2+ */
+	tmp = omap_readl(OTG_REV);
+	if (cpu_class_is_omap1()) {
+		ctrl_name = "tranceiver_ctrl";
+		trans = omap_readw(USB_TRANSCEIVER_CTRL);
+	}
+	seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
+		tmp >> 4, tmp & 0xf, ctrl_name, trans);
+	tmp = omap_readw(OTG_SYSCON_1);
+	seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
+			FOURBITS "\n", tmp,
+		trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R),
+		trx_mode(USB1_TRX_MODE(tmp), trans & CONF_USB1_UNI_R),
+		(USB0_TRX_MODE(tmp) == 0 && !cpu_is_omap1710())
+			? "internal"
+			: trx_mode(USB0_TRX_MODE(tmp), 1),
+		(tmp & OTG_IDLE_EN) ? " !otg" : "",
+		(tmp & HST_IDLE_EN) ? " !host" : "",
+		(tmp & DEV_IDLE_EN) ? " !dev" : "",
+		(tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active");
+	tmp = omap_readl(OTG_SYSCON_2);
+	seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS
+			" b_ase_brst=%d hmc=%d\n", tmp,
+		(tmp & OTG_EN) ? " otg_en" : "",
+		(tmp & USBX_SYNCHRO) ? " synchro" : "",
+		// much more SRP stuff
+		(tmp & SRP_DATA) ? " srp_data" : "",
+		(tmp & SRP_VBUS) ? " srp_vbus" : "",
+		(tmp & OTG_PADEN) ? " otg_paden" : "",
+		(tmp & HMC_PADEN) ? " hmc_paden" : "",
+		(tmp & UHOST_EN) ? " uhost_en" : "",
+		(tmp & HMC_TLLSPEED) ? " tllspeed" : "",
+		(tmp & HMC_TLLATTACH) ? " tllattach" : "",
+		B_ASE_BRST(tmp),
+		OTG_HMC(tmp));
+	tmp = omap_readl(OTG_CTRL);
+	seq_printf(s, "otg_ctrl    %06x" EIGHTBITS EIGHTBITS "%s\n", tmp,
+		(tmp & OTG_ASESSVLD) ? " asess" : "",
+		(tmp & OTG_BSESSEND) ? " bsess_end" : "",
+		(tmp & OTG_BSESSVLD) ? " bsess" : "",
+		(tmp & OTG_VBUSVLD) ? " vbus" : "",
+		(tmp & OTG_ID) ? " id" : "",
+		(tmp & OTG_DRIVER_SEL) ? " DEVICE" : " HOST",
+		(tmp & OTG_A_SETB_HNPEN) ? " a_setb_hnpen" : "",
+		(tmp & OTG_A_BUSREQ) ? " a_bus" : "",
+		(tmp & OTG_B_HNPEN) ? " b_hnpen" : "",
+		(tmp & OTG_B_BUSREQ) ? " b_bus" : "",
+		(tmp & OTG_BUSDROP) ? " busdrop" : "",
+		(tmp & OTG_PULLDOWN) ? " down" : "",
+		(tmp & OTG_PULLUP) ? " up" : "",
+		(tmp & OTG_DRV_VBUS) ? " drv" : "",
+		(tmp & OTG_PD_VBUS) ? " pd_vb" : "",
+		(tmp & OTG_PU_VBUS) ? " pu_vb" : "",
+		(tmp & OTG_PU_ID) ? " pu_id" : ""
+		);
+	tmp = omap_readw(OTG_IRQ_EN);
+	seq_printf(s, "otg_irq_en  %04x" "\n", tmp);
+	tmp = omap_readw(OTG_IRQ_SRC);
+	seq_printf(s, "otg_irq_src %04x" "\n", tmp);
+	tmp = omap_readw(OTG_OUTCTRL);
+	seq_printf(s, "otg_outctrl %04x" "\n", tmp);
+	tmp = omap_readw(OTG_TEST);
+	seq_printf(s, "otg_test    %04x" "\n", tmp);
+	return 0;
+}
+
+static int proc_udc_show(struct seq_file *s, void *_)
+{
+	u32		tmp;
+	struct omap_ep	*ep;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	seq_printf(s, "%s, version: " DRIVER_VERSION
+#ifdef	USE_ISO
+		" (iso)"
+#endif
+		"%s\n",
+		driver_desc,
+		use_dma ?  " (dma)" : "");
+
+	tmp = omap_readw(UDC_REV) & 0xff;
+	seq_printf(s,
+		"UDC rev %d.%d, fifo mode %d, gadget %s\n"
+		"hmc %d, transceiver %s\n",
+		tmp >> 4, tmp & 0xf,
+		fifo_mode,
+		udc->driver ? udc->driver->driver.name : "(none)",
+		HMC,
+		udc->transceiver
+			? udc->transceiver->label
+			: ((cpu_is_omap1710() || cpu_is_omap24xx())
+				? "external" : "(none)"));
+	if (cpu_class_is_omap1()) {
+		seq_printf(s, "ULPD control %04x req %04x status %04x\n",
+			omap_readw(ULPD_CLOCK_CTRL),
+			omap_readw(ULPD_SOFT_REQ),
+			omap_readw(ULPD_STATUS_REQ));
+	}
+
+	/* OTG controller registers */
+	if (!cpu_is_omap15xx())
+		proc_otg_show(s);
+
+	tmp = omap_readw(UDC_SYSCON1);
+	seq_printf(s, "\nsyscon1     %04x" EIGHTBITS "\n", tmp,
+		(tmp & UDC_CFG_LOCK) ? " cfg_lock" : "",
+		(tmp & UDC_DATA_ENDIAN) ? " data_endian" : "",
+		(tmp & UDC_DMA_ENDIAN) ? " dma_endian" : "",
+		(tmp & UDC_NAK_EN) ? " nak" : "",
+		(tmp & UDC_AUTODECODE_DIS) ? " autodecode_dis" : "",
+		(tmp & UDC_SELF_PWR) ? " self_pwr" : "",
+		(tmp & UDC_SOFF_DIS) ? " soff_dis" : "",
+		(tmp & UDC_PULLUP_EN) ? " PULLUP" : "");
+	// syscon2 is write-only
+
+	/* UDC controller registers */
+	if (!(tmp & UDC_PULLUP_EN)) {
+		seq_printf(s, "(suspended)\n");
+		spin_unlock_irqrestore(&udc->lock, flags);
+		return 0;
+	}
+
+	tmp = omap_readw(UDC_DEVSTAT);
+	seq_printf(s, "devstat     %04x" EIGHTBITS "%s%s\n", tmp,
+		(tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "",
+		(tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "",
+		(tmp & UDC_A_ALT_HNP_SUPPORT) ? " a_alt_hnp" : "",
+		(tmp & UDC_R_WK_OK) ? " r_wk_ok" : "",
+		(tmp & UDC_USB_RESET) ? " usb_reset" : "",
+		(tmp & UDC_SUS) ? " SUS" : "",
+		(tmp & UDC_CFG) ? " CFG" : "",
+		(tmp & UDC_ADD) ? " ADD" : "",
+		(tmp & UDC_DEF) ? " DEF" : "",
+		(tmp & UDC_ATT) ? " ATT" : "");
+	seq_printf(s, "sof         %04x\n", omap_readw(UDC_SOF));
+	tmp = omap_readw(UDC_IRQ_EN);
+	seq_printf(s, "irq_en      %04x" FOURBITS "%s\n", tmp,
+		(tmp & UDC_SOF_IE) ? " sof" : "",
+		(tmp & UDC_EPN_RX_IE) ? " epn_rx" : "",
+		(tmp & UDC_EPN_TX_IE) ? " epn_tx" : "",
+		(tmp & UDC_DS_CHG_IE) ? " ds_chg" : "",
+		(tmp & UDC_EP0_IE) ? " ep0" : "");
+	tmp = omap_readw(UDC_IRQ_SRC);
+	seq_printf(s, "irq_src     %04x" EIGHTBITS "%s%s\n", tmp,
+		(tmp & UDC_TXN_DONE) ? " txn_done" : "",
+		(tmp & UDC_RXN_CNT) ? " rxn_cnt" : "",
+		(tmp & UDC_RXN_EOT) ? " rxn_eot" : "",
+		(tmp & UDC_IRQ_SOF) ? " sof" : "",
+		(tmp & UDC_EPN_RX) ? " epn_rx" : "",
+		(tmp & UDC_EPN_TX) ? " epn_tx" : "",
+		(tmp & UDC_DS_CHG) ? " ds_chg" : "",
+		(tmp & UDC_SETUP) ? " setup" : "",
+		(tmp & UDC_EP0_RX) ? " ep0out" : "",
+		(tmp & UDC_EP0_TX) ? " ep0in" : "");
+	if (use_dma) {
+		unsigned i;
+
+		tmp = omap_readw(UDC_DMA_IRQ_EN);
+		seq_printf(s, "dma_irq_en  %04x%s" EIGHTBITS "\n", tmp,
+			(tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "",
+			(tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "",
+			(tmp & UDC_RX_EOT_IE(3)) ? " rx2_eot" : "",
+
+			(tmp & UDC_TX_DONE_IE(2)) ? " tx1_done" : "",
+			(tmp & UDC_RX_CNT_IE(2)) ? " rx1_cnt" : "",
+			(tmp & UDC_RX_EOT_IE(2)) ? " rx1_eot" : "",
+
+			(tmp & UDC_TX_DONE_IE(1)) ? " tx0_done" : "",
+			(tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "",
+			(tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : "");
+
+		tmp = omap_readw(UDC_RXDMA_CFG);
+		seq_printf(s, "rxdma_cfg   %04x\n", tmp);
+		if (tmp) {
+			for (i = 0; i < 3; i++) {
+				if ((tmp & (0x0f << (i * 4))) == 0)
+					continue;
+				seq_printf(s, "rxdma[%d]    %04x\n", i,
+						omap_readw(UDC_RXDMA(i + 1)));
+			}
+		}
+		tmp = omap_readw(UDC_TXDMA_CFG);
+		seq_printf(s, "txdma_cfg   %04x\n", tmp);
+		if (tmp) {
+			for (i = 0; i < 3; i++) {
+				if (!(tmp & (0x0f << (i * 4))))
+					continue;
+				seq_printf(s, "txdma[%d]    %04x\n", i,
+						omap_readw(UDC_TXDMA(i + 1)));
+			}
+		}
+	}
+
+	tmp = omap_readw(UDC_DEVSTAT);
+	if (tmp & UDC_ATT) {
+		proc_ep_show(s, &udc->ep[0]);
+		if (tmp & UDC_ADD) {
+			list_for_each_entry (ep, &udc->gadget.ep_list,
+					ep.ep_list) {
+				if (ep->desc)
+					proc_ep_show(s, ep);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+static int proc_udc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_udc_show, NULL);
+}
+
+static const struct file_operations proc_ops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_udc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void create_proc_file(void)
+{
+	proc_create(proc_filename, 0, NULL, &proc_ops);
+}
+
+static void remove_proc_file(void)
+{
+	remove_proc_entry(proc_filename, NULL);
+}
+
+#else
+
+static inline void create_proc_file(void) {}
+static inline void remove_proc_file(void) {}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* Before this controller can enumerate, we need to pick an endpoint
+ * configuration, or "fifo_mode"  That involves allocating 2KB of packet
+ * buffer space among the endpoints we'll be operating.
+ *
+ * NOTE: as of OMAP 1710 ES2.0, writing a new endpoint config when
+ * UDC_SYSCON_1.CFG_LOCK is set can now work.  We won't use that
+ * capability yet though.
+ */
+static unsigned __init
+omap_ep_setup(char *name, u8 addr, u8 type,
+		unsigned buf, unsigned maxp, int dbuf)
+{
+	struct omap_ep	*ep;
+	u16		epn_rxtx = 0;
+
+	/* OUT endpoints first, then IN */
+	ep = &udc->ep[addr & 0xf];
+	if (addr & USB_DIR_IN)
+		ep += 16;
+
+	/* in case of ep init table bugs */
+	BUG_ON(ep->name[0]);
+
+	/* chip setup ... bit values are same for IN, OUT */
+	if (type == USB_ENDPOINT_XFER_ISOC) {
+		switch (maxp) {
+		case 8:		epn_rxtx = 0 << 12; break;
+		case 16:	epn_rxtx = 1 << 12; break;
+		case 32:	epn_rxtx = 2 << 12; break;
+		case 64:	epn_rxtx = 3 << 12; break;
+		case 128:	epn_rxtx = 4 << 12; break;
+		case 256:	epn_rxtx = 5 << 12; break;
+		case 512:	epn_rxtx = 6 << 12; break;
+		default:	BUG();
+		}
+		epn_rxtx |= UDC_EPN_RX_ISO;
+		dbuf = 1;
+	} else {
+		/* double-buffering "not supported" on 15xx,
+		 * and ignored for PIO-IN on newer chips
+		 * (for more reliable behavior)
+		 */
+		if (!use_dma || cpu_is_omap15xx() || cpu_is_omap24xx())
+			dbuf = 0;
+
+		switch (maxp) {
+		case 8:		epn_rxtx = 0 << 12; break;
+		case 16:	epn_rxtx = 1 << 12; break;
+		case 32:	epn_rxtx = 2 << 12; break;
+		case 64:	epn_rxtx = 3 << 12; break;
+		default:	BUG();
+		}
+		if (dbuf && addr)
+			epn_rxtx |= UDC_EPN_RX_DB;
+		init_timer(&ep->timer);
+		ep->timer.function = pio_out_timer;
+		ep->timer.data = (unsigned long) ep;
+	}
+	if (addr)
+		epn_rxtx |= UDC_EPN_RX_VALID;
+	BUG_ON(buf & 0x07);
+	epn_rxtx |= buf >> 3;
+
+	DBG("%s addr %02x rxtx %04x maxp %d%s buf %d\n",
+		name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf);
+
+	if (addr & USB_DIR_IN)
+		omap_writew(epn_rxtx, UDC_EP_TX(addr & 0xf));
+	else
+		omap_writew(epn_rxtx, UDC_EP_RX(addr));
+
+	/* next endpoint's buffer starts after this one's */
+	buf += maxp;
+	if (dbuf)
+		buf += maxp;
+	BUG_ON(buf > 2048);
+
+	/* set up driver data structures */
+	BUG_ON(strlen(name) >= sizeof ep->name);
+	strlcpy(ep->name, name, sizeof ep->name);
+	INIT_LIST_HEAD(&ep->queue);
+	INIT_LIST_HEAD(&ep->iso);
+	ep->bEndpointAddress = addr;
+	ep->bmAttributes = type;
+	ep->double_buf = dbuf;
+	ep->udc = udc;
+
+	ep->ep.name = ep->name;
+	ep->ep.ops = &omap_ep_ops;
+	ep->ep.maxpacket = ep->maxpacket = maxp;
+	list_add_tail (&ep->ep.ep_list, &udc->gadget.ep_list);
+
+	return buf;
+}
+
+static void omap_udc_release(struct device *dev)
+{
+	complete(udc->done);
+	kfree (udc);
+	udc = NULL;
+}
+
+static int __init
+omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
+{
+	unsigned	tmp, buf;
+
+	/* abolish any previous hardware state */
+	omap_writew(0, UDC_SYSCON1);
+	omap_writew(0, UDC_IRQ_EN);
+	omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
+	omap_writew(0, UDC_DMA_IRQ_EN);
+	omap_writew(0, UDC_RXDMA_CFG);
+	omap_writew(0, UDC_TXDMA_CFG);
+
+	/* UDC_PULLUP_EN gates the chip clock */
+	// OTG_SYSCON_1 |= DEV_IDLE_EN;
+
+	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+	if (!udc)
+		return -ENOMEM;
+
+	spin_lock_init (&udc->lock);
+
+	udc->gadget.ops = &omap_gadget_ops;
+	udc->gadget.ep0 = &udc->ep[0].ep;
+	INIT_LIST_HEAD(&udc->gadget.ep_list);
+	INIT_LIST_HEAD(&udc->iso);
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	udc->gadget.max_speed = USB_SPEED_FULL;
+	udc->gadget.name = driver_name;
+
+	device_initialize(&udc->gadget.dev);
+	dev_set_name(&udc->gadget.dev, "gadget");
+	udc->gadget.dev.release = omap_udc_release;
+	udc->gadget.dev.parent = &odev->dev;
+	if (use_dma)
+		udc->gadget.dev.dma_mask = odev->dev.dma_mask;
+
+	udc->transceiver = xceiv;
+
+	/* ep0 is special; put it right after the SETUP buffer */
+	buf = omap_ep_setup("ep0", 0, USB_ENDPOINT_XFER_CONTROL,
+			8 /* after SETUP */, 64 /* maxpacket */, 0);
+	list_del_init(&udc->ep[0].ep.ep_list);
+
+	/* initially disable all non-ep0 endpoints */
+	for (tmp = 1; tmp < 15; tmp++) {
+		omap_writew(0, UDC_EP_RX(tmp));
+		omap_writew(0, UDC_EP_TX(tmp));
+	}
+
+#define OMAP_BULK_EP(name,addr) \
+	buf = omap_ep_setup(name "-bulk", addr, \
+			USB_ENDPOINT_XFER_BULK, buf, 64, 1);
+#define OMAP_INT_EP(name,addr, maxp) \
+	buf = omap_ep_setup(name "-int", addr, \
+			USB_ENDPOINT_XFER_INT, buf, maxp, 0);
+#define OMAP_ISO_EP(name,addr, maxp) \
+	buf = omap_ep_setup(name "-iso", addr, \
+			USB_ENDPOINT_XFER_ISOC, buf, maxp, 1);
+
+	switch (fifo_mode) {
+	case 0:
+		OMAP_BULK_EP("ep1in",  USB_DIR_IN  | 1);
+		OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+		OMAP_INT_EP("ep3in",   USB_DIR_IN  | 3, 16);
+		break;
+	case 1:
+		OMAP_BULK_EP("ep1in",  USB_DIR_IN  | 1);
+		OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+		OMAP_INT_EP("ep9in",   USB_DIR_IN  | 9, 16);
+
+		OMAP_BULK_EP("ep3in",  USB_DIR_IN  | 3);
+		OMAP_BULK_EP("ep4out", USB_DIR_OUT | 4);
+		OMAP_INT_EP("ep10in",  USB_DIR_IN  | 10, 16);
+
+		OMAP_BULK_EP("ep5in",  USB_DIR_IN  | 5);
+		OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
+		OMAP_INT_EP("ep11in",  USB_DIR_IN  | 11, 16);
+
+		OMAP_BULK_EP("ep6in",  USB_DIR_IN  | 6);
+		OMAP_BULK_EP("ep6out", USB_DIR_OUT | 6);
+		OMAP_INT_EP("ep12in",  USB_DIR_IN  | 12, 16);
+
+		OMAP_BULK_EP("ep7in",  USB_DIR_IN  | 7);
+		OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
+		OMAP_INT_EP("ep13in",  USB_DIR_IN  | 13, 16);
+		OMAP_INT_EP("ep13out", USB_DIR_OUT | 13, 16);
+
+		OMAP_BULK_EP("ep8in",  USB_DIR_IN  | 8);
+		OMAP_BULK_EP("ep8out", USB_DIR_OUT | 8);
+		OMAP_INT_EP("ep14in",  USB_DIR_IN  | 14, 16);
+		OMAP_INT_EP("ep14out", USB_DIR_OUT | 14, 16);
+
+		OMAP_BULK_EP("ep15in",  USB_DIR_IN  | 15);
+		OMAP_BULK_EP("ep15out", USB_DIR_OUT | 15);
+
+		break;
+
+#ifdef	USE_ISO
+	case 2:			/* mixed iso/bulk */
+		OMAP_ISO_EP("ep1in",   USB_DIR_IN  | 1, 256);
+		OMAP_ISO_EP("ep2out",  USB_DIR_OUT | 2, 256);
+		OMAP_ISO_EP("ep3in",   USB_DIR_IN  | 3, 128);
+		OMAP_ISO_EP("ep4out",  USB_DIR_OUT | 4, 128);
+
+		OMAP_INT_EP("ep5in",   USB_DIR_IN  | 5, 16);
+
+		OMAP_BULK_EP("ep6in",  USB_DIR_IN  | 6);
+		OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
+		OMAP_INT_EP("ep8in",   USB_DIR_IN  | 8, 16);
+		break;
+	case 3:			/* mixed bulk/iso */
+		OMAP_BULK_EP("ep1in",  USB_DIR_IN  | 1);
+		OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+		OMAP_INT_EP("ep3in",   USB_DIR_IN  | 3, 16);
+
+		OMAP_BULK_EP("ep4in",  USB_DIR_IN  | 4);
+		OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
+		OMAP_INT_EP("ep6in",   USB_DIR_IN  | 6, 16);
+
+		OMAP_ISO_EP("ep7in",   USB_DIR_IN  | 7, 256);
+		OMAP_ISO_EP("ep8out",  USB_DIR_OUT | 8, 256);
+		OMAP_INT_EP("ep9in",   USB_DIR_IN  | 9, 16);
+		break;
+#endif
+
+	/* add more modes as needed */
+
+	default:
+		ERR("unsupported fifo_mode #%d\n", fifo_mode);
+		return -ENODEV;
+	}
+	omap_writew(UDC_CFG_LOCK|UDC_SELF_PWR, UDC_SYSCON1);
+	INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf);
+	return 0;
+}
+
+static int __init omap_udc_probe(struct platform_device *pdev)
+{
+	int			status = -ENODEV;
+	int			hmc;
+	struct usb_phy		*xceiv = NULL;
+	const char		*type = NULL;
+	struct omap_usb_config	*config = pdev->dev.platform_data;
+	struct clk		*dc_clk;
+	struct clk		*hhc_clk;
+
+	/* NOTE:  "knows" the order of the resources! */
+	if (!request_mem_region(pdev->resource[0].start,
+			pdev->resource[0].end - pdev->resource[0].start + 1,
+			driver_name)) {
+		DBG("request_mem_region failed\n");
+		return -EBUSY;
+	}
+
+	if (cpu_is_omap16xx()) {
+		dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
+		hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
+		BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
+		/* can't use omap_udc_enable_clock yet */
+		clk_enable(dc_clk);
+		clk_enable(hhc_clk);
+		udelay(100);
+	}
+
+	if (cpu_is_omap24xx()) {
+		dc_clk = clk_get(&pdev->dev, "usb_fck");
+		hhc_clk = clk_get(&pdev->dev, "usb_l4_ick");
+		BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
+		/* can't use omap_udc_enable_clock yet */
+		clk_enable(dc_clk);
+		clk_enable(hhc_clk);
+		udelay(100);
+	}
+
+	if (cpu_is_omap7xx()) {
+		dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
+		hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck");
+		BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
+		/* can't use omap_udc_enable_clock yet */
+		clk_enable(dc_clk);
+		clk_enable(hhc_clk);
+		udelay(100);
+	}
+
+	INFO("OMAP UDC rev %d.%d%s\n",
+		omap_readw(UDC_REV) >> 4, omap_readw(UDC_REV) & 0xf,
+		config->otg ? ", Mini-AB" : "");
+
+	/* use the mode given to us by board init code */
+	if (cpu_is_omap15xx()) {
+		hmc = HMC_1510;
+		type = "(unknown)";
+
+		if (machine_without_vbus_sense()) {
+			/* just set up software VBUS detect, and then
+			 * later rig it so we always report VBUS.
+			 * FIXME without really sensing VBUS, we can't
+			 * know when to turn PULLUP_EN on/off; and that
+			 * means we always "need" the 48MHz clock.
+			 */
+			u32 tmp = omap_readl(FUNC_MUX_CTRL_0);
+			tmp &= ~VBUS_CTRL_1510;
+			omap_writel(tmp, FUNC_MUX_CTRL_0);
+			tmp |= VBUS_MODE_1510;
+			tmp &= ~VBUS_CTRL_1510;
+			omap_writel(tmp, FUNC_MUX_CTRL_0);
+		}
+	} else {
+		/* The transceiver may package some GPIO logic or handle
+		 * loopback and/or transceiverless setup; if we find one,
+		 * use it.  Except for OTG, we don't _need_ to talk to one;
+		 * but not having one probably means no VBUS detection.
+		 */
+		xceiv = usb_get_transceiver();
+		if (xceiv)
+			type = xceiv->label;
+		else if (config->otg) {
+			DBG("OTG requires external transceiver!\n");
+			goto cleanup0;
+		}
+
+		hmc = HMC_1610;
+
+		if (cpu_is_omap24xx()) {
+			/* this could be transceiverless in one of the
+			 * "we don't need to know" modes.
+			 */
+			type = "external";
+			goto known;
+		}
+
+		switch (hmc) {
+		case 0:			/* POWERUP DEFAULT == 0 */
+		case 4:
+		case 12:
+		case 20:
+			if (!cpu_is_omap1710()) {
+				type = "integrated";
+				break;
+			}
+			/* FALL THROUGH */
+		case 3:
+		case 11:
+		case 16:
+		case 19:
+		case 25:
+			if (!xceiv) {
+				DBG("external transceiver not registered!\n");
+				type = "unknown";
+			}
+			break;
+		case 21:			/* internal loopback */
+			type = "loopback";
+			break;
+		case 14:			/* transceiverless */
+			if (cpu_is_omap1710())
+				goto bad_on_1710;
+			/* FALL THROUGH */
+		case 13:
+		case 15:
+			type = "no";
+			break;
+
+		default:
+bad_on_1710:
+			ERR("unrecognized UDC HMC mode %d\n", hmc);
+			goto cleanup0;
+		}
+	}
+known:
+	INFO("hmc mode %d, %s transceiver\n", hmc, type);
+
+	/* a "gadget" abstracts/virtualizes the controller */
+	status = omap_udc_setup(pdev, xceiv);
+	if (status) {
+		goto cleanup0;
+	}
+	xceiv = NULL;
+	// "udc" is now valid
+	pullup_disable(udc);
+#if	defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+	udc->gadget.is_otg = (config->otg != 0);
+#endif
+
+	/* starting with omap1710 es2.0, clear toggle is a separate bit */
+	if (omap_readw(UDC_REV) >= 0x61)
+		udc->clr_halt = UDC_RESET_EP | UDC_CLRDATA_TOGGLE;
+	else
+		udc->clr_halt = UDC_RESET_EP;
+
+	/* USB general purpose IRQ:  ep0, state changes, dma, etc */
+	status = request_irq(pdev->resource[1].start, omap_udc_irq,
+			IRQF_SAMPLE_RANDOM, driver_name, udc);
+	if (status != 0) {
+		ERR("can't get irq %d, err %d\n",
+			(int) pdev->resource[1].start, status);
+		goto cleanup1;
+	}
+
+	/* USB "non-iso" IRQ (PIO for all but ep0) */
+	status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
+			IRQF_SAMPLE_RANDOM, "omap_udc pio", udc);
+	if (status != 0) {
+		ERR("can't get irq %d, err %d\n",
+			(int) pdev->resource[2].start, status);
+		goto cleanup2;
+	}
+#ifdef	USE_ISO
+	status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
+			0, "omap_udc iso", udc);
+	if (status != 0) {
+		ERR("can't get irq %d, err %d\n",
+			(int) pdev->resource[3].start, status);
+		goto cleanup3;
+	}
+#endif
+	if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+		udc->dc_clk = dc_clk;
+		udc->hhc_clk = hhc_clk;
+		clk_disable(hhc_clk);
+		clk_disable(dc_clk);
+	}
+
+	if (cpu_is_omap24xx()) {
+		udc->dc_clk = dc_clk;
+		udc->hhc_clk = hhc_clk;
+		/* FIXME OMAP2 don't release hhc & dc clock */
+#if 0
+		clk_disable(hhc_clk);
+		clk_disable(dc_clk);
+#endif
+	}
+
+	create_proc_file();
+	status = device_add(&udc->gadget.dev);
+	if (status)
+		goto cleanup4;
+
+	status = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+	if (!status)
+		return status;
+	/* If fail, fall through */
+cleanup4:
+	remove_proc_file();
+
+#ifdef	USE_ISO
+cleanup3:
+	free_irq(pdev->resource[2].start, udc);
+#endif
+
+cleanup2:
+	free_irq(pdev->resource[1].start, udc);
+
+cleanup1:
+	kfree (udc);
+	udc = NULL;
+
+cleanup0:
+	if (xceiv)
+		usb_put_transceiver(xceiv);
+
+	if (cpu_is_omap16xx() || cpu_is_omap24xx() || cpu_is_omap7xx()) {
+		clk_disable(hhc_clk);
+		clk_disable(dc_clk);
+		clk_put(hhc_clk);
+		clk_put(dc_clk);
+	}
+
+	release_mem_region(pdev->resource[0].start,
+			pdev->resource[0].end - pdev->resource[0].start + 1);
+
+	return status;
+}
+
+static int __exit omap_udc_remove(struct platform_device *pdev)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	if (!udc)
+		return -ENODEV;
+
+	usb_del_gadget_udc(&udc->gadget);
+	if (udc->driver)
+		return -EBUSY;
+
+	udc->done = &done;
+
+	pullup_disable(udc);
+	if (udc->transceiver) {
+		usb_put_transceiver(udc->transceiver);
+		udc->transceiver = NULL;
+	}
+	omap_writew(0, UDC_SYSCON1);
+
+	remove_proc_file();
+
+#ifdef	USE_ISO
+	free_irq(pdev->resource[3].start, udc);
+#endif
+	free_irq(pdev->resource[2].start, udc);
+	free_irq(pdev->resource[1].start, udc);
+
+	if (udc->dc_clk) {
+		if (udc->clk_requested)
+			omap_udc_enable_clock(0);
+		clk_put(udc->hhc_clk);
+		clk_put(udc->dc_clk);
+	}
+
+	release_mem_region(pdev->resource[0].start,
+			pdev->resource[0].end - pdev->resource[0].start + 1);
+
+	device_unregister(&udc->gadget.dev);
+	wait_for_completion(&done);
+
+	return 0;
+}
+
+/* suspend/resume/wakeup from sysfs (echo > power/state) or when the
+ * system is forced into deep sleep
+ *
+ * REVISIT we should probably reject suspend requests when there's a host
+ * session active, rather than disconnecting, at least on boards that can
+ * report VBUS irqs (UDC_DEVSTAT.UDC_ATT).  And in any case, we need to
+ * make host resumes and VBUS detection trigger OMAP wakeup events; that
+ * may involve talking to an external transceiver (e.g. isp1301).
+ */
+
+static int omap_udc_suspend(struct platform_device *dev, pm_message_t message)
+{
+	u32	devstat;
+
+	devstat = omap_readw(UDC_DEVSTAT);
+
+	/* we're requesting 48 MHz clock if the pullup is enabled
+	 * (== we're attached to the host) and we're not suspended,
+	 * which would prevent entry to deep sleep...
+	 */
+	if ((devstat & UDC_ATT) != 0 && (devstat & UDC_SUS) == 0) {
+		WARNING("session active; suspend requires disconnect\n");
+		omap_pullup(&udc->gadget, 0);
+	}
+
+	return 0;
+}
+
+static int omap_udc_resume(struct platform_device *dev)
+{
+	DBG("resume + wakeup/SRP\n");
+	omap_pullup(&udc->gadget, 1);
+
+	/* maybe the host would enumerate us if we nudged it */
+	msleep(100);
+	return omap_wakeup(&udc->gadget);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+	.remove		= __exit_p(omap_udc_remove),
+	.suspend	= omap_udc_suspend,
+	.resume		= omap_udc_resume,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= (char *) driver_name,
+	},
+};
+
+static int __init udc_init(void)
+{
+	/* Disable DMA for omap7xx -- it doesn't work right. */
+	if (cpu_is_omap7xx())
+		use_dma = 0;
+
+	INFO("%s, version: " DRIVER_VERSION
+#ifdef	USE_ISO
+		" (iso)"
+#endif
+		"%s\n", driver_desc,
+		use_dma ?  " (dma)" : "");
+	return platform_driver_probe(&udc_driver, omap_udc_probe);
+}
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+	platform_driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap_udc");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/omap_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/omap_udc.h
new file mode 100644
index 0000000..59d3b22
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/omap_udc.h
@@ -0,0 +1,207 @@
+/*
+ * omap_udc.h -- for omap 3.2 udc, with OTG support
+ *
+ * 2004 (C) Texas Instruments, Inc.
+ * 2004 (C) David Brownell
+ */
+
+/*
+ * USB device/endpoint management registers
+ */
+
+#define	UDC_REV				(UDC_BASE + 0x0)	/* Revision */
+#define	UDC_EP_NUM			(UDC_BASE + 0x4)	/* Which endpoint */
+#	define	UDC_SETUP_SEL		(1 << 6)
+#	define	UDC_EP_SEL		(1 << 5)
+#	define	UDC_EP_DIR		(1 << 4)
+	/* low 4 bits for endpoint number */
+#define	UDC_DATA			(UDC_BASE + 0x08)	/* Endpoint FIFO */
+#define	UDC_CTRL			(UDC_BASE + 0x0C)	/* Endpoint control */
+#	define	UDC_CLR_HALT		(1 << 7)
+#	define	UDC_SET_HALT		(1 << 6)
+#	define	UDC_CLRDATA_TOGGLE	(1 << 3)
+#	define	UDC_SET_FIFO_EN		(1 << 2)
+#	define	UDC_CLR_EP		(1 << 1)
+#	define	UDC_RESET_EP		(1 << 0)
+#define	UDC_STAT_FLG			(UDC_BASE + 0x10)	/* Endpoint status */
+#	define	UDC_NO_RXPACKET		(1 << 15)
+#	define	UDC_MISS_IN		(1 << 14)
+#	define	UDC_DATA_FLUSH		(1 << 13)
+#	define	UDC_ISO_ERR		(1 << 12)
+#	define	UDC_ISO_FIFO_EMPTY	(1 << 9)
+#	define	UDC_ISO_FIFO_FULL	(1 << 8)
+#	define	UDC_EP_HALTED		(1 << 6)
+#	define	UDC_STALL		(1 << 5)
+#	define	UDC_NAK			(1 << 4)
+#	define	UDC_ACK			(1 << 3)
+#	define	UDC_FIFO_EN		(1 << 2)
+#	define	UDC_NON_ISO_FIFO_EMPTY	(1 << 1)
+#	define	UDC_NON_ISO_FIFO_FULL	(1 << 0)
+#define	UDC_RXFSTAT			(UDC_BASE + 0x14)	/* OUT bytecount */
+#define	UDC_SYSCON1			(UDC_BASE + 0x18)	/* System config 1 */
+#	define	UDC_CFG_LOCK		(1 << 8)
+#	define	UDC_DATA_ENDIAN		(1 << 7)
+#	define	UDC_DMA_ENDIAN		(1 << 6)
+#	define	UDC_NAK_EN		(1 << 4)
+#	define	UDC_AUTODECODE_DIS	(1 << 3)
+#	define	UDC_SELF_PWR		(1 << 2)
+#	define	UDC_SOFF_DIS		(1 << 1)
+#	define	UDC_PULLUP_EN		(1 << 0)
+#define	UDC_SYSCON2			(UDC_BASE + 0x1C)	/* System config 2 */
+#	define	UDC_RMT_WKP		(1 << 6)
+#	define	UDC_STALL_CMD		(1 << 5)
+#	define	UDC_DEV_CFG		(1 << 3)
+#	define	UDC_CLR_CFG		(1 << 2)
+#define	UDC_DEVSTAT			(UDC_BASE + 0x20)	/* Device status */
+#	define	UDC_B_HNP_ENABLE	(1 << 9)
+#	define	UDC_A_HNP_SUPPORT	(1 << 8)
+#	define	UDC_A_ALT_HNP_SUPPORT	(1 << 7)
+#	define	UDC_R_WK_OK		(1 << 6)
+#	define	UDC_USB_RESET		(1 << 5)
+#	define	UDC_SUS			(1 << 4)
+#	define	UDC_CFG			(1 << 3)
+#	define	UDC_ADD			(1 << 2)
+#	define	UDC_DEF			(1 << 1)
+#	define	UDC_ATT			(1 << 0)
+#define	UDC_SOF				(UDC_BASE + 0x24)	/* Start of frame */
+#	define	UDC_FT_LOCK		(1 << 12)
+#	define	UDC_TS_OK		(1 << 11)
+#	define	UDC_TS			0x03ff
+#define	UDC_IRQ_EN			(UDC_BASE + 0x28)	/* Interrupt enable */
+#	define	UDC_SOF_IE		(1 << 7)
+#	define	UDC_EPN_RX_IE		(1 << 5)
+#	define	UDC_EPN_TX_IE		(1 << 4)
+#	define	UDC_DS_CHG_IE		(1 << 3)
+#	define	UDC_EP0_IE		(1 << 0)
+#define	UDC_DMA_IRQ_EN			(UDC_BASE + 0x2C)	/* DMA irq enable */
+	/* rx/tx dma channels numbered 1-3 not 0-2 */
+#	define	UDC_TX_DONE_IE(n)	(1 << (4 * (n) - 2))
+#	define	UDC_RX_CNT_IE(n)	(1 << (4 * (n) - 3))
+#	define	UDC_RX_EOT_IE(n)	(1 << (4 * (n) - 4))
+#define	UDC_IRQ_SRC			(UDC_BASE + 0x30)	/* Interrupt source */
+#	define	UDC_TXN_DONE		(1 << 10)
+#	define	UDC_RXN_CNT		(1 << 9)
+#	define	UDC_RXN_EOT		(1 << 8)
+#	define	UDC_IRQ_SOF		(1 << 7)
+#	define	UDC_EPN_RX		(1 << 5)
+#	define	UDC_EPN_TX		(1 << 4)
+#	define	UDC_DS_CHG		(1 << 3)
+#	define	UDC_SETUP		(1 << 2)
+#	define	UDC_EP0_RX		(1 << 1)
+#	define	UDC_EP0_TX		(1 << 0)
+#	define	UDC_IRQ_SRC_MASK	0x7bf
+#define	UDC_EPN_STAT			(UDC_BASE + 0x34)	/* EP irq status */
+#define	UDC_DMAN_STAT			(UDC_BASE + 0x38)	/* DMA irq status */
+#	define	UDC_DMA_RX_SB		(1 << 12)
+#	define	UDC_DMA_RX_SRC(x)	(((x)>>8) & 0xf)
+#	define	UDC_DMA_TX_SRC(x)	(((x)>>0) & 0xf)
+
+
+/* DMA configuration registers:  up to three channels in each direction.  */
+#define	UDC_RXDMA_CFG			(UDC_BASE + 0x40)	/* 3 eps for RX DMA */
+#	define	UDC_DMA_REQ		(1 << 12)
+#define	UDC_TXDMA_CFG			(UDC_BASE + 0x44)	/* 3 eps for TX DMA */
+#define	UDC_DATA_DMA			(UDC_BASE + 0x48)	/* rx/tx fifo addr */
+
+/* rx/tx dma control, numbering channels 1-3 not 0-2 */
+#define	UDC_TXDMA(chan)			(UDC_BASE + 0x50 - 4 + 4 * (chan))
+#	define UDC_TXN_EOT		(1 << 15)	/* bytes vs packets */
+#	define UDC_TXN_START		(1 << 14)	/* start transfer */
+#	define UDC_TXN_TSC		0x03ff		/* units in xfer */
+#define	UDC_RXDMA(chan)			(UDC_BASE + 0x60 - 4 + 4 * (chan))
+#	define UDC_RXN_STOP		(1 << 15)	/* enable EOT irq */
+#	define UDC_RXN_TC		0x00ff		/* packets in xfer */
+
+
+/*
+ * Endpoint configuration registers (used before CFG_LOCK is set)
+ * UDC_EP_TX(0) is unused
+ */
+#define	UDC_EP_RX(endpoint)		(UDC_BASE + 0x80 + (endpoint)*4)
+#	define	UDC_EPN_RX_VALID	(1 << 15)
+#	define	UDC_EPN_RX_DB		(1 << 14)
+	/* buffer size in bits 13, 12 */
+#	define	UDC_EPN_RX_ISO		(1 << 11)
+	/* buffer pointer in low 11 bits */
+#define	UDC_EP_TX(endpoint)		(UDC_BASE + 0xc0 + (endpoint)*4)
+	/* same bitfields as in RX */
+
+/*-------------------------------------------------------------------------*/
+
+struct omap_req {
+	struct usb_request		req;
+	struct list_head		queue;
+	unsigned			dma_bytes;
+	unsigned			mapped:1;
+};
+
+struct omap_ep {
+	struct usb_ep			ep;
+	struct list_head		queue;
+	unsigned long			irqs;
+	struct list_head		iso;
+	const struct usb_endpoint_descriptor	*desc;
+	char				name[14];
+	u16				maxpacket;
+	u8				bEndpointAddress;
+	u8				bmAttributes;
+	unsigned			double_buf:1;
+	unsigned			stopped:1;
+	unsigned			fnf:1;
+	unsigned			has_dma:1;
+	u8				ackwait;
+	u8				dma_channel;
+	u16				dma_counter;
+	int				lch;
+	struct omap_udc			*udc;
+	struct timer_list		timer;
+};
+
+struct omap_udc {
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	spinlock_t			lock;
+	struct omap_ep			ep[32];
+	u16				devstat;
+	u16				clr_halt;
+	struct usb_phy			*transceiver;
+	struct list_head		iso;
+	unsigned			softconnect:1;
+	unsigned			vbus_active:1;
+	unsigned			ep0_pending:1;
+	unsigned			ep0_in:1;
+	unsigned			ep0_set_config:1;
+	unsigned			ep0_reset_config:1;
+	unsigned			ep0_setup:1;
+	struct completion		*done;
+	struct clk			*dc_clk;
+	struct clk			*hhc_clk;
+	unsigned			clk_requested:1;
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef VERBOSE
+#    define VDBG		DBG
+#else
+#    define VDBG(stuff...)	do{}while(0)
+#endif
+
+#define ERR(stuff...)		pr_err("udc: " stuff)
+#define WARNING(stuff...)	pr_warning("udc: " stuff)
+#define INFO(stuff...)		pr_info("udc: " stuff)
+#define DBG(stuff...)		pr_debug("udc: " stuff)
+
+/*-------------------------------------------------------------------------*/
+
+/* MOD_CONF_CTRL_0 */
+#define VBUS_W2FC_1510		(1 << 17)	/* 0 gpio0, 1 dvdd2 pin */
+
+/* FUNC_MUX_CTRL_0 */
+#define	VBUS_CTRL_1510		(1 << 19)	/* 1 connected (software) */
+#define	VBUS_MODE_1510		(1 << 18)	/* 0 hardware, 1 software */
+
+#define	HMC_1510	((omap_readl(MOD_CONF_CTRL_0) >> 1) & 0x3f)
+#define	HMC_1610	(omap_readl(OTG_SYSCON_2) & 0x3f)
+#define	HMC		(cpu_is_omap15xx() ? HMC_1510 : HMC_1610)
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pch_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pch_udc.c
new file mode 100644
index 0000000..6530706
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pch_udc.c
@@ -0,0 +1,3310 @@
+/*
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+
+/* GPIO port for VBUS detecting */
+static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
+
+#define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
+#define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
+
+/* Address offset of Registers */
+#define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
+
+#define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
+#define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
+#define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
+#define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
+#define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
+#define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
+#define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
+
+#define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
+#define UDC_DEVCTL_ADDR		0x404	/* Device control */
+#define UDC_DEVSTS_ADDR		0x408	/* Device status */
+#define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
+#define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
+#define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
+#define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
+#define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
+#define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
+#define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
+#define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
+
+/* Endpoint control register */
+/* Bit position */
+#define UDC_EPCTL_MRXFLUSH		(1 << 12)
+#define UDC_EPCTL_RRDY			(1 << 9)
+#define UDC_EPCTL_CNAK			(1 << 8)
+#define UDC_EPCTL_SNAK			(1 << 7)
+#define UDC_EPCTL_NAK			(1 << 6)
+#define UDC_EPCTL_P			(1 << 3)
+#define UDC_EPCTL_F			(1 << 1)
+#define UDC_EPCTL_S			(1 << 0)
+#define UDC_EPCTL_ET_SHIFT		4
+/* Mask patern */
+#define UDC_EPCTL_ET_MASK		0x00000030
+/* Value for ET field */
+#define UDC_EPCTL_ET_CONTROL		0
+#define UDC_EPCTL_ET_ISO		1
+#define UDC_EPCTL_ET_BULK		2
+#define UDC_EPCTL_ET_INTERRUPT		3
+
+/* Endpoint status register */
+/* Bit position */
+#define UDC_EPSTS_XFERDONE		(1 << 27)
+#define UDC_EPSTS_RSS			(1 << 26)
+#define UDC_EPSTS_RCS			(1 << 25)
+#define UDC_EPSTS_TXEMPTY		(1 << 24)
+#define UDC_EPSTS_TDC			(1 << 10)
+#define UDC_EPSTS_HE			(1 << 9)
+#define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
+#define UDC_EPSTS_BNA			(1 << 7)
+#define UDC_EPSTS_IN			(1 << 6)
+#define UDC_EPSTS_OUT_SHIFT		4
+/* Mask patern */
+#define UDC_EPSTS_OUT_MASK		0x00000030
+#define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
+/* Value for OUT field */
+#define UDC_EPSTS_OUT_SETUP		2
+#define UDC_EPSTS_OUT_DATA		1
+
+/* Device configuration register */
+/* Bit position */
+#define UDC_DEVCFG_CSR_PRG		(1 << 17)
+#define UDC_DEVCFG_SP			(1 << 3)
+/* SPD Valee */
+#define UDC_DEVCFG_SPD_HS		0x0
+#define UDC_DEVCFG_SPD_FS		0x1
+#define UDC_DEVCFG_SPD_LS		0x2
+
+/* Device control register */
+/* Bit position */
+#define UDC_DEVCTL_THLEN_SHIFT		24
+#define UDC_DEVCTL_BRLEN_SHIFT		16
+#define UDC_DEVCTL_CSR_DONE		(1 << 13)
+#define UDC_DEVCTL_SD			(1 << 10)
+#define UDC_DEVCTL_MODE			(1 << 9)
+#define UDC_DEVCTL_BREN			(1 << 8)
+#define UDC_DEVCTL_THE			(1 << 7)
+#define UDC_DEVCTL_DU			(1 << 4)
+#define UDC_DEVCTL_TDE			(1 << 3)
+#define UDC_DEVCTL_RDE			(1 << 2)
+#define UDC_DEVCTL_RES			(1 << 0)
+
+/* Device status register */
+/* Bit position */
+#define UDC_DEVSTS_TS_SHIFT		18
+#define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
+#define UDC_DEVSTS_ALT_SHIFT		8
+#define UDC_DEVSTS_INTF_SHIFT		4
+#define UDC_DEVSTS_CFG_SHIFT		0
+/* Mask patern */
+#define UDC_DEVSTS_TS_MASK		0xfffc0000
+#define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
+#define UDC_DEVSTS_ALT_MASK		0x00000f00
+#define UDC_DEVSTS_INTF_MASK		0x000000f0
+#define UDC_DEVSTS_CFG_MASK		0x0000000f
+/* value for maximum speed for SPEED field */
+#define UDC_DEVSTS_ENUM_SPEED_FULL	1
+#define UDC_DEVSTS_ENUM_SPEED_HIGH	0
+#define UDC_DEVSTS_ENUM_SPEED_LOW	2
+#define UDC_DEVSTS_ENUM_SPEED_FULLX	3
+
+/* Device irq register */
+/* Bit position */
+#define UDC_DEVINT_RWKP			(1 << 7)
+#define UDC_DEVINT_ENUM			(1 << 6)
+#define UDC_DEVINT_SOF			(1 << 5)
+#define UDC_DEVINT_US			(1 << 4)
+#define UDC_DEVINT_UR			(1 << 3)
+#define UDC_DEVINT_ES			(1 << 2)
+#define UDC_DEVINT_SI			(1 << 1)
+#define UDC_DEVINT_SC			(1 << 0)
+/* Mask patern */
+#define UDC_DEVINT_MSK			0x7f
+
+/* Endpoint irq register */
+/* Bit position */
+#define UDC_EPINT_IN_SHIFT		0
+#define UDC_EPINT_OUT_SHIFT		16
+#define UDC_EPINT_IN_EP0		(1 << 0)
+#define UDC_EPINT_OUT_EP0		(1 << 16)
+/* Mask patern */
+#define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
+
+/* UDC_CSR_BUSY Status register */
+/* Bit position */
+#define UDC_CSR_BUSY			(1 << 0)
+
+/* SOFT RESET register */
+/* Bit position */
+#define UDC_PSRST			(1 << 1)
+#define UDC_SRST			(1 << 0)
+
+/* USB_DEVICE endpoint register */
+/* Bit position */
+#define UDC_CSR_NE_NUM_SHIFT		0
+#define UDC_CSR_NE_DIR_SHIFT		4
+#define UDC_CSR_NE_TYPE_SHIFT		5
+#define UDC_CSR_NE_CFG_SHIFT		7
+#define UDC_CSR_NE_INTF_SHIFT		11
+#define UDC_CSR_NE_ALT_SHIFT		15
+#define UDC_CSR_NE_MAX_PKT_SHIFT	19
+/* Mask patern */
+#define UDC_CSR_NE_NUM_MASK		0x0000000f
+#define UDC_CSR_NE_DIR_MASK		0x00000010
+#define UDC_CSR_NE_TYPE_MASK		0x00000060
+#define UDC_CSR_NE_CFG_MASK		0x00000780
+#define UDC_CSR_NE_INTF_MASK		0x00007800
+#define UDC_CSR_NE_ALT_MASK		0x00078000
+#define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
+
+#define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
+#define PCH_UDC_EPINT(in, num)\
+		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
+
+/* Index of endpoint */
+#define UDC_EP0IN_IDX		0
+#define UDC_EP0OUT_IDX		1
+#define UDC_EPIN_IDX(ep)	(ep * 2)
+#define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
+#define PCH_UDC_EP0		0
+#define PCH_UDC_EP1		1
+#define PCH_UDC_EP2		2
+#define PCH_UDC_EP3		3
+
+/* Number of endpoint */
+#define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
+#define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
+/* Length Value */
+#define PCH_UDC_BRLEN		0x0F	/* Burst length */
+#define PCH_UDC_THLEN		0x1F	/* Threshold length */
+/* Value of EP Buffer Size */
+#define UDC_EP0IN_BUFF_SIZE	16
+#define UDC_EPIN_BUFF_SIZE	256
+#define UDC_EP0OUT_BUFF_SIZE	16
+#define UDC_EPOUT_BUFF_SIZE	256
+/* Value of EP maximum packet size */
+#define UDC_EP0IN_MAX_PKT_SIZE	64
+#define UDC_EP0OUT_MAX_PKT_SIZE	64
+#define UDC_BULK_MAX_PKT_SIZE	512
+
+/* DMA */
+#define DMA_DIR_RX		1	/* DMA for data receive */
+#define DMA_DIR_TX		2	/* DMA for data transmit */
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+#define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
+
+/**
+ * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
+ *				  for data
+ * @status:		Status quadlet
+ * @reserved:		Reserved
+ * @dataptr:		Buffer descriptor
+ * @next:		Next descriptor
+ */
+struct pch_udc_data_dma_desc {
+	u32 status;
+	u32 reserved;
+	u32 dataptr;
+	u32 next;
+};
+
+/**
+ * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
+ *				 for control data
+ * @status:	Status
+ * @reserved:	Reserved
+ * @data12:	First setup word
+ * @data34:	Second setup word
+ */
+struct pch_udc_stp_dma_desc {
+	u32 status;
+	u32 reserved;
+	struct usb_ctrlrequest request;
+} __attribute((packed));
+
+/* DMA status definitions */
+/* Buffer status */
+#define PCH_UDC_BUFF_STS	0xC0000000
+#define PCH_UDC_BS_HST_RDY	0x00000000
+#define PCH_UDC_BS_DMA_BSY	0x40000000
+#define PCH_UDC_BS_DMA_DONE	0x80000000
+#define PCH_UDC_BS_HST_BSY	0xC0000000
+/*  Rx/Tx Status */
+#define PCH_UDC_RXTX_STS	0x30000000
+#define PCH_UDC_RTS_SUCC	0x00000000
+#define PCH_UDC_RTS_DESERR	0x10000000
+#define PCH_UDC_RTS_BUFERR	0x30000000
+/* Last Descriptor Indication */
+#define PCH_UDC_DMA_LAST	0x08000000
+/* Number of Rx/Tx Bytes Mask */
+#define PCH_UDC_RXTX_BYTES	0x0000ffff
+
+/**
+ * struct pch_udc_cfg_data - Structure to hold current configuration
+ *			     and interface information
+ * @cur_cfg:	current configuration in use
+ * @cur_intf:	current interface in use
+ * @cur_alt:	current alt interface in use
+ */
+struct pch_udc_cfg_data {
+	u16 cur_cfg;
+	u16 cur_intf;
+	u16 cur_alt;
+};
+
+/**
+ * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
+ * @ep:			embedded ep request
+ * @td_stp_phys:	for setup request
+ * @td_data_phys:	for data request
+ * @td_stp:		for setup request
+ * @td_data:		for data request
+ * @dev:		reference to device struct
+ * @offset_addr:	offset address of ep register
+ * @desc:		for this ep
+ * @queue:		queue for requests
+ * @num:		endpoint number
+ * @in:			endpoint is IN
+ * @halted:		endpoint halted?
+ * @epsts:		Endpoint status
+ */
+struct pch_udc_ep {
+	struct usb_ep			ep;
+	dma_addr_t			td_stp_phys;
+	dma_addr_t			td_data_phys;
+	struct pch_udc_stp_dma_desc	*td_stp;
+	struct pch_udc_data_dma_desc	*td_data;
+	struct pch_udc_dev		*dev;
+	unsigned long			offset_addr;
+	const struct usb_endpoint_descriptor	*desc;
+	struct list_head		queue;
+	unsigned			num:5,
+					in:1,
+					halted:1;
+	unsigned long			epsts;
+};
+
+/**
+ * struct pch_vbus_gpio_data - Structure holding GPIO informaton
+ *					for detecting VBUS
+ * @port:		gpio port number
+ * @intr:		gpio interrupt number
+ * @irq_work_fall	Structure for WorkQueue
+ * @irq_work_rise	Structure for WorkQueue
+ */
+struct pch_vbus_gpio_data {
+	int			port;
+	int			intr;
+	struct work_struct	irq_work_fall;
+	struct work_struct	irq_work_rise;
+};
+
+/**
+ * struct pch_udc_dev - Structure holding complete information
+ *			of the PCH USB device
+ * @gadget:		gadget driver data
+ * @driver:		reference to gadget driver bound
+ * @pdev:		reference to the PCI device
+ * @ep:			array of endpoints
+ * @lock:		protects all state
+ * @active:		enabled the PCI device
+ * @stall:		stall requested
+ * @prot_stall:		protcol stall requested
+ * @irq_registered:	irq registered with system
+ * @mem_region:		device memory mapped
+ * @registered:		driver regsitered with system
+ * @suspended:		driver in suspended state
+ * @connected:		gadget driver associated
+ * @vbus_session:	required vbus_session state
+ * @set_cfg_not_acked:	pending acknowledgement 4 setup
+ * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
+ * @data_requests:	DMA pool for data requests
+ * @stp_requests:	DMA pool for setup requests
+ * @dma_addr:		DMA pool for received
+ * @ep0out_buf:		Buffer for DMA
+ * @setup_data:		Received setup data
+ * @phys_addr:		of device memory
+ * @base_addr:		for mapped device memory
+ * @irq:		IRQ line for the device
+ * @cfg_data:		current cfg, intf, and alt in use
+ * @vbus_gpio:		GPIO informaton for detecting VBUS
+ */
+struct pch_udc_dev {
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	struct pci_dev			*pdev;
+	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
+	spinlock_t			lock; /* protects all state */
+	unsigned	active:1,
+			stall:1,
+			prot_stall:1,
+			irq_registered:1,
+			mem_region:1,
+			registered:1,
+			suspended:1,
+			connected:1,
+			vbus_session:1,
+			set_cfg_not_acked:1,
+			waiting_zlp_ack:1;
+	struct pci_pool		*data_requests;
+	struct pci_pool		*stp_requests;
+	dma_addr_t			dma_addr;
+	void				*ep0out_buf;
+	struct usb_ctrlrequest		setup_data;
+	unsigned long			phys_addr;
+	void __iomem			*base_addr;
+	unsigned			irq;
+	struct pch_udc_cfg_data		cfg_data;
+	struct pch_vbus_gpio_data	vbus_gpio;
+};
+
+#define PCH_UDC_PCI_BAR			1
+#define PCI_DEVICE_ID_INTEL_EG20T_UDC	0x8808
+#define PCI_VENDOR_ID_ROHM		0x10DB
+#define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
+#define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
+
+static const char	ep0_string[] = "ep0in";
+static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
+struct pch_udc_dev *pch_udc;		/* pointer to device object */
+static bool speed_fs;
+module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
+MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
+
+/**
+ * struct pch_udc_request - Structure holding a PCH USB device request packet
+ * @req:		embedded ep request
+ * @td_data_phys:	phys. address
+ * @td_data:		first dma desc. of chain
+ * @td_data_last:	last dma desc. of chain
+ * @queue:		associated queue
+ * @dma_going:		DMA in progress for request
+ * @dma_mapped:		DMA memory mapped for request
+ * @dma_done:		DMA completed for request
+ * @chain_len:		chain length
+ * @buf:		Buffer memory for align adjustment
+ * @dma:		DMA memory for align adjustment
+ */
+struct pch_udc_request {
+	struct usb_request		req;
+	dma_addr_t			td_data_phys;
+	struct pch_udc_data_dma_desc	*td_data;
+	struct pch_udc_data_dma_desc	*td_data_last;
+	struct list_head		queue;
+	unsigned			dma_going:1,
+					dma_mapped:1,
+					dma_done:1;
+	unsigned			chain_len;
+	void				*buf;
+	dma_addr_t			dma;
+};
+
+static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
+{
+	return ioread32(dev->base_addr + reg);
+}
+
+static inline void pch_udc_writel(struct pch_udc_dev *dev,
+				    unsigned long val, unsigned long reg)
+{
+	iowrite32(val, dev->base_addr + reg);
+}
+
+static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
+				     unsigned long reg,
+				     unsigned long bitmask)
+{
+	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
+}
+
+static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
+				     unsigned long reg,
+				     unsigned long bitmask)
+{
+	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
+}
+
+static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
+{
+	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
+}
+
+static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
+				    unsigned long val, unsigned long reg)
+{
+	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
+}
+
+static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
+				     unsigned long reg,
+				     unsigned long bitmask)
+{
+	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
+}
+
+static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
+				     unsigned long reg,
+				     unsigned long bitmask)
+{
+	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
+}
+
+/**
+ * pch_udc_csr_busy() - Wait till idle.
+ * @dev:	Reference to pch_udc_dev structure
+ */
+static void pch_udc_csr_busy(struct pch_udc_dev *dev)
+{
+	unsigned int count = 200;
+
+	/* Wait till idle */
+	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
+		&& --count)
+		cpu_relax();
+	if (!count)
+		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
+}
+
+/**
+ * pch_udc_write_csr() - Write the command and status registers.
+ * @dev:	Reference to pch_udc_dev structure
+ * @val:	value to be written to CSR register
+ * @addr:	address of CSR register
+ */
+static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
+			       unsigned int ep)
+{
+	unsigned long reg = PCH_UDC_CSR(ep);
+
+	pch_udc_csr_busy(dev);		/* Wait till idle */
+	pch_udc_writel(dev, val, reg);
+	pch_udc_csr_busy(dev);		/* Wait till idle */
+}
+
+/**
+ * pch_udc_read_csr() - Read the command and status registers.
+ * @dev:	Reference to pch_udc_dev structure
+ * @addr:	address of CSR register
+ *
+ * Return codes:	content of CSR register
+ */
+static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
+{
+	unsigned long reg = PCH_UDC_CSR(ep);
+
+	pch_udc_csr_busy(dev);		/* Wait till idle */
+	pch_udc_readl(dev, reg);	/* Dummy read */
+	pch_udc_csr_busy(dev);		/* Wait till idle */
+	return pch_udc_readl(dev, reg);
+}
+
+/**
+ * pch_udc_rmt_wakeup() - Initiate for remote wakeup
+ * @dev:	Reference to pch_udc_dev structure
+ */
+static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+	mdelay(1);
+	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
+/**
+ * pch_udc_get_frame() - Get the current frame from device status register
+ * @dev:	Reference to pch_udc_dev structure
+ * Retern	current frame
+ */
+static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
+{
+	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
+	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
+}
+
+/**
+ * pch_udc_clear_selfpowered() - Clear the self power control
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
+}
+
+/**
+ * pch_udc_set_selfpowered() - Set the self power control
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
+}
+
+/**
+ * pch_udc_set_disconnect() - Set the disconnect status.
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+}
+
+/**
+ * pch_udc_clear_disconnect() - Clear the disconnect status.
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
+{
+	/* Clear the disconnect */
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+	mdelay(1);
+	/* Resume USB signalling */
+	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
+/**
+ * pch_udc_reconnect() - This API initializes usb device controller,
+ *						and clear the disconnect status.
+ * @dev:		Reference to pch_udc_regs structure
+ */
+static void pch_udc_init(struct pch_udc_dev *dev);
+static void pch_udc_reconnect(struct pch_udc_dev *dev)
+{
+	pch_udc_init(dev);
+
+	/* enable device interrupts */
+	/* pch_udc_enable_interrupts() */
+	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
+			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
+
+	/* Clear the disconnect */
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+	mdelay(1);
+	/* Resume USB signalling */
+	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
+/**
+ * pch_udc_vbus_session() - set or clearr the disconnect status.
+ * @dev:	Reference to pch_udc_regs structure
+ * @is_active:	Parameter specifying the action
+ *		  0:   indicating VBUS power is ending
+ *		  !0:  indicating VBUS power is starting
+ */
+static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
+					  int is_active)
+{
+	if (is_active) {
+		pch_udc_reconnect(dev);
+		dev->vbus_session = 1;
+	} else {
+		if (dev->driver && dev->driver->disconnect) {
+			spin_unlock(&dev->lock);
+			dev->driver->disconnect(&dev->gadget);
+			spin_lock(&dev->lock);
+		}
+		pch_udc_set_disconnect(dev);
+		dev->vbus_session = 0;
+	}
+}
+
+/**
+ * pch_udc_ep_set_stall() - Set the stall of endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
+{
+	if (ep->in) {
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+	} else {
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+	}
+}
+
+/**
+ * pch_udc_ep_clear_stall() - Clear the stall of endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
+{
+	/* Clear the stall */
+	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+	/* Clear NAK by writing CNAK */
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
+}
+
+/**
+ * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @type:	Type of endpoint
+ */
+static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
+					u8 type)
+{
+	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
+				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @buf_size:	The buffer word size
+ */
+static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
+						 u32 buf_size, u32 ep_in)
+{
+	u32 data;
+	if (ep_in) {
+		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
+		data = (data & 0xffff0000) | (buf_size & 0xffff);
+		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
+	} else {
+		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
+		data = (buf_size << 16) | (data & 0xffff);
+		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
+	}
+}
+
+/**
+ * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @pkt_size:	The packet byte size
+ */
+static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
+{
+	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
+	data = (data & 0xffff0000) | (pkt_size & 0xffff);
+	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @addr:	Address of the register
+ */
+static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
+{
+	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @addr:	Address of the register
+ */
+static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
+{
+	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
+{
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
+}
+
+/**
+ * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
+{
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
+}
+
+/**
+ * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
+{
+	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
+}
+
+/**
+ * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
+ *			register depending on the direction specified
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @dir:	whether Tx or Rx
+ *		  DMA_DIR_RX: Receive
+ *		  DMA_DIR_TX: Transmit
+ */
+static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
+{
+	if (dir == DMA_DIR_RX)
+		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
+	else if (dir == DMA_DIR_TX)
+		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
+}
+
+/**
+ * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
+ *				 register depending on the direction specified
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @dir:	Whether Tx or Rx
+ *		  DMA_DIR_RX: Receive
+ *		  DMA_DIR_TX: Transmit
+ */
+static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
+{
+	if (dir == DMA_DIR_RX)
+		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
+	else if (dir == DMA_DIR_TX)
+		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
+}
+
+/**
+ * pch_udc_set_csr_done() - Set the device control register
+ *				CSR done field (bit 13)
+ * @dev:	reference to structure of type pch_udc_regs
+ */
+static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
+}
+
+/**
+ * pch_udc_disable_interrupts() - Disables the specified interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @mask:	Mask to disable interrupts
+ */
+static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
+					    u32 mask)
+{
+	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_enable_interrupts() - Enable the specified interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @mask:	Mask to enable interrupts
+ */
+static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
+					   u32 mask)
+{
+	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @mask:	Mask to disable interrupts
+ */
+static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
+						u32 mask)
+{
+	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @mask:	Mask to enable interrupts
+ */
+static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
+					      u32 mask)
+{
+	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_read_device_interrupts() - Read the device interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * Retern	The device interrupts
+ */
+static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
+{
+	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_write_device_interrupts() - Write device interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @val:	The value to be written to interrupt register
+ */
+static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
+						     u32 val)
+{
+	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * Retern	The endpoint interrupt
+ */
+static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
+{
+	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_write_ep_interrupts() - Clear endpoint interupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @val:	The value to be written to interrupt register
+ */
+static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
+					     u32 val)
+{
+	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_device_status() - Read the device status
+ * @dev:	Reference to structure of type pch_udc_regs
+ * Retern	The device status
+ */
+static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
+{
+	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_control() - Read the endpoint control
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * Retern	The endpoint control register value
+ */
+static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
+{
+	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_clear_ep_control() - Clear the endpoint control register
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * Retern	The endpoint control register value
+ */
+static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
+{
+	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_status() - Read the endpoint status
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * Retern	The endpoint status
+ */
+static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
+{
+	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
+}
+
+/**
+ * pch_udc_clear_ep_status() - Clear the endpoint status
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @stat:	Endpoint status
+ */
+static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
+					 u32 stat)
+{
+	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
+ *				of the endpoint control register
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
+{
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
+}
+
+/**
+ * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
+ *				of the endpoint control register
+ * @ep:		reference to structure of type pch_udc_ep_regs
+ */
+static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
+{
+	unsigned int loopcnt = 0;
+	struct pch_udc_dev *dev = ep->dev;
+
+	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
+		return;
+	if (!ep->in) {
+		loopcnt = 10000;
+		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
+			--loopcnt)
+			udelay(5);
+		if (!loopcnt)
+			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
+				__func__);
+	}
+	loopcnt = 10000;
+	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
+		udelay(5);
+	}
+	if (!loopcnt)
+		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
+			__func__, ep->num, (ep->in ? "in" : "out"));
+}
+
+/**
+ * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
+ * @ep:	reference to structure of type pch_udc_ep_regs
+ * @dir:	direction of endpoint
+ *		  0:  endpoint is OUT
+ *		  !0: endpoint is IN
+ */
+static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
+{
+	if (dir) {	/* IN ep */
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
+		return;
+	}
+}
+
+/**
+ * pch_udc_ep_enable() - This api enables endpoint
+ * @regs:	Reference to structure pch_udc_ep_regs
+ * @desc:	endpoint descriptor
+ */
+static void pch_udc_ep_enable(struct pch_udc_ep *ep,
+			       struct pch_udc_cfg_data *cfg,
+			       const struct usb_endpoint_descriptor *desc)
+{
+	u32 val = 0;
+	u32 buff_size = 0;
+
+	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
+	if (ep->in)
+		buff_size = UDC_EPIN_BUFF_SIZE;
+	else
+		buff_size = UDC_EPOUT_BUFF_SIZE;
+	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
+	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
+	pch_udc_ep_set_nak(ep);
+	pch_udc_ep_fifo_flush(ep, ep->in);
+	/* Configure the endpoint */
+	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
+	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
+		UDC_CSR_NE_TYPE_SHIFT) |
+	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
+	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
+	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
+	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
+
+	if (ep->in)
+		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
+	else
+		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
+}
+
+/**
+ * pch_udc_ep_disable() - This api disables endpoint
+ * @regs:	Reference to structure pch_udc_ep_regs
+ */
+static void pch_udc_ep_disable(struct pch_udc_ep *ep)
+{
+	if (ep->in) {
+		/* flush the fifo */
+		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
+		/* set NAK */
+		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
+		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
+	} else {
+		/* set NAK */
+		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
+	}
+	/* reset desc pointer */
+	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
+}
+
+/**
+ * pch_udc_wait_ep_stall() - Wait EP stall.
+ * @dev:	Reference to pch_udc_dev structure
+ */
+static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
+{
+	unsigned int count = 10000;
+
+	/* Wait till idle */
+	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
+		udelay(5);
+	if (!count)
+		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
+}
+
+/**
+ * pch_udc_init() - This API initializes usb device controller
+ * @dev:	Rreference to pch_udc_regs structure
+ */
+static void pch_udc_init(struct pch_udc_dev *dev)
+{
+	if (NULL == dev) {
+		pr_err("%s: Invalid address\n", __func__);
+		return;
+	}
+	/* Soft Reset and Reset PHY */
+	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
+	mdelay(1);
+	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
+	mdelay(1);
+	/* mask and clear all device interrupts */
+	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
+	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
+
+	/* mask and clear all ep interrupts */
+	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+
+	/* enable dynamic CSR programmingi, self powered and device speed */
+	if (speed_fs)
+		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
+				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
+	else /* defaul high speed */
+		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
+				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
+			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
+			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
+			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
+			UDC_DEVCTL_THE);
+}
+
+/**
+ * pch_udc_exit() - This API exit usb device controller
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static void pch_udc_exit(struct pch_udc_dev *dev)
+{
+	/* mask all device interrupts */
+	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
+	/* mask all ep interrupts */
+	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+	/* put device in disconnected state */
+	pch_udc_set_disconnect(dev);
+}
+
+/**
+ * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
+ * @gadget:	Reference to the gadget driver
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
+{
+	struct pch_udc_dev	*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	return pch_udc_get_frame(dev);
+}
+
+/**
+ * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
+ * @gadget:	Reference to the gadget driver
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
+{
+	struct pch_udc_dev	*dev;
+	unsigned long		flags;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	spin_lock_irqsave(&dev->lock, flags);
+	pch_udc_rmt_wakeup(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
+ *				is self powered or not
+ * @gadget:	Reference to the gadget driver
+ * @value:	Specifies self powered or not
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
+{
+	struct pch_udc_dev	*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	if (value)
+		pch_udc_set_selfpowered(dev);
+	else
+		pch_udc_clear_selfpowered(dev);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_pullup() - This API is invoked to make the device
+ *				visible/invisible to the host
+ * @gadget:	Reference to the gadget driver
+ * @is_on:	Specifies whether the pull up is made active or inactive
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct pch_udc_dev	*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	if (is_on) {
+		pch_udc_reconnect(dev);
+	} else {
+		if (dev->driver && dev->driver->disconnect) {
+			spin_unlock(&dev->lock);
+			dev->driver->disconnect(&dev->gadget);
+			spin_lock(&dev->lock);
+		}
+		pch_udc_set_disconnect(dev);
+	}
+
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
+ *				transceiver (or GPIO) that
+ *				detects a VBUS power session starting/ending
+ * @gadget:	Reference to the gadget driver
+ * @is_active:	specifies whether the session is starting or ending
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct pch_udc_dev	*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	pch_udc_vbus_session(dev, is_active);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
+ *				SET_CONFIGURATION calls to
+ *				specify how much power the device can consume
+ * @gadget:	Reference to the gadget driver
+ * @mA:		specifies the current limit in 2mA unit
+ *
+ * Return codes:
+ *	-EINVAL:	If the gadget passed is NULL
+ *	-EOPNOTSUPP:
+ */
+static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
+{
+	return -EOPNOTSUPP;
+}
+
+static int pch_udc_start(struct usb_gadget_driver *driver,
+	int (*bind)(struct usb_gadget *));
+static int pch_udc_stop(struct usb_gadget_driver *driver);
+static const struct usb_gadget_ops pch_udc_ops = {
+	.get_frame = pch_udc_pcd_get_frame,
+	.wakeup = pch_udc_pcd_wakeup,
+	.set_selfpowered = pch_udc_pcd_selfpowered,
+	.pullup = pch_udc_pcd_pullup,
+	.vbus_session = pch_udc_pcd_vbus_session,
+	.vbus_draw = pch_udc_pcd_vbus_draw,
+	.start	= pch_udc_start,
+	.stop	= pch_udc_stop,
+};
+
+/**
+ * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
+ * @dev:	Reference to the driver structure
+ *
+ * Return value:
+ *	1: VBUS is high
+ *	0: VBUS is low
+ *     -1: It is not enable to detect VBUS using GPIO
+ */
+static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
+{
+	int vbus = 0;
+
+	if (dev->vbus_gpio.port)
+		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
+	else
+		vbus = -1;
+
+	return vbus;
+}
+
+/**
+ * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
+ *                             If VBUS is Low, disconnect is processed
+ * @irq_work:	Structure for WorkQueue
+ *
+ */
+static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
+{
+	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
+		struct pch_vbus_gpio_data, irq_work_fall);
+	struct pch_udc_dev *dev =
+		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
+	int vbus_saved = -1;
+	int vbus;
+	int count;
+
+	if (!dev->vbus_gpio.port)
+		return;
+
+	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
+		count++) {
+		vbus = pch_vbus_gpio_get_value(dev);
+
+		if ((vbus_saved == vbus) && (vbus == 0)) {
+			dev_dbg(&dev->pdev->dev, "VBUS fell");
+			if (dev->driver
+				&& dev->driver->disconnect) {
+				dev->driver->disconnect(
+					&dev->gadget);
+			}
+			if (dev->vbus_gpio.intr)
+				pch_udc_init(dev);
+			else
+				pch_udc_reconnect(dev);
+			return;
+		}
+		vbus_saved = vbus;
+		mdelay(PCH_VBUS_INTERVAL);
+	}
+}
+
+/**
+ * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
+ *                             If VBUS is High, connect is processed
+ * @irq_work:	Structure for WorkQueue
+ *
+ */
+static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
+{
+	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
+		struct pch_vbus_gpio_data, irq_work_rise);
+	struct pch_udc_dev *dev =
+		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
+	int vbus;
+
+	if (!dev->vbus_gpio.port)
+		return;
+
+	mdelay(PCH_VBUS_INTERVAL);
+	vbus = pch_vbus_gpio_get_value(dev);
+
+	if (vbus == 1) {
+		dev_dbg(&dev->pdev->dev, "VBUS rose");
+		pch_udc_reconnect(dev);
+		return;
+	}
+}
+
+/**
+ * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS
+ * @irq:	Interrupt request number
+ * @dev:	Reference to the device structure
+ *
+ * Return codes:
+ *	0: Success
+ *	-EINVAL: GPIO port is invalid or can't be initialized.
+ */
+static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
+{
+	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
+
+	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
+		return IRQ_NONE;
+
+	if (pch_vbus_gpio_get_value(dev))
+		schedule_work(&dev->vbus_gpio.irq_work_rise);
+	else
+		schedule_work(&dev->vbus_gpio.irq_work_fall);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
+ * @dev:	Reference to the driver structure
+ * @vbus_gpio	Number of GPIO port to detect gpio
+ *
+ * Return codes:
+ *	0: Success
+ *	-EINVAL: GPIO port is invalid or can't be initialized.
+ */
+static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
+{
+	int err;
+	int irq_num = 0;
+
+	dev->vbus_gpio.port = 0;
+	dev->vbus_gpio.intr = 0;
+
+	if (vbus_gpio_port <= -1)
+		return -EINVAL;
+
+	err = gpio_is_valid(vbus_gpio_port);
+	if (!err) {
+		pr_err("%s: gpio port %d is invalid\n",
+			__func__, vbus_gpio_port);
+		return -EINVAL;
+	}
+
+	err = gpio_request(vbus_gpio_port, "pch_vbus");
+	if (err) {
+		pr_err("%s: can't request gpio port %d, err: %d\n",
+			__func__, vbus_gpio_port, err);
+		return -EINVAL;
+	}
+
+	dev->vbus_gpio.port = vbus_gpio_port;
+	gpio_direction_input(vbus_gpio_port);
+	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
+
+	irq_num = gpio_to_irq(vbus_gpio_port);
+	if (irq_num > 0) {
+		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
+		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
+			"vbus_detect", dev);
+		if (!err) {
+			dev->vbus_gpio.intr = irq_num;
+			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
+				pch_vbus_gpio_work_rise);
+		} else {
+			pr_err("%s: can't request irq %d, err: %d\n",
+				__func__, irq_num, err);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * pch_vbus_gpio_free() - This API frees resources of GPIO port
+ * @dev:	Reference to the driver structure
+ */
+static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
+{
+	if (dev->vbus_gpio.intr)
+		free_irq(dev->vbus_gpio.intr, dev);
+
+	if (dev->vbus_gpio.port)
+		gpio_free(dev->vbus_gpio.port);
+}
+
+/**
+ * complete_req() - This API is invoked from the driver when processing
+ *			of a request is complete
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request structure
+ * @status:	Indicates the success/failure of completion
+ */
+static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
+								 int status)
+{
+	struct pch_udc_dev	*dev;
+	unsigned halted = ep->halted;
+
+	list_del_init(&req->queue);
+
+	/* set new status if pending */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	dev = ep->dev;
+	if (req->dma_mapped) {
+		if (req->dma == DMA_ADDR_INVALID) {
+			if (ep->in)
+				dma_unmap_single(&dev->pdev->dev, req->req.dma,
+						 req->req.length,
+						 DMA_TO_DEVICE);
+			else
+				dma_unmap_single(&dev->pdev->dev, req->req.dma,
+						 req->req.length,
+						 DMA_FROM_DEVICE);
+			req->req.dma = DMA_ADDR_INVALID;
+		} else {
+			if (ep->in)
+				dma_unmap_single(&dev->pdev->dev, req->dma,
+						 req->req.length,
+						 DMA_TO_DEVICE);
+			else {
+				dma_unmap_single(&dev->pdev->dev, req->dma,
+						 req->req.length,
+						 DMA_FROM_DEVICE);
+				memcpy(req->req.buf, req->buf, req->req.length);
+			}
+			kfree(req->buf);
+			req->dma = DMA_ADDR_INVALID;
+		}
+		req->dma_mapped = 0;
+	}
+	ep->halted = 1;
+	spin_unlock(&dev->lock);
+	if (!ep->in)
+		pch_udc_ep_clear_rrdy(ep);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&dev->lock);
+	ep->halted = halted;
+}
+
+/**
+ * empty_req_queue() - This API empties the request queue of an endpoint
+ * @ep:		Reference to the endpoint structure
+ */
+static void empty_req_queue(struct pch_udc_ep *ep)
+{
+	struct pch_udc_request	*req;
+
+	ep->halted = 1;
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
+	}
+}
+
+/**
+ * pch_udc_free_dma_chain() - This function frees the DMA chain created
+ *				for the request
+ * @dev		Reference to the driver structure
+ * @req		Reference to the request to be freed
+ *
+ * Return codes:
+ *	0: Success
+ */
+static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
+				   struct pch_udc_request *req)
+{
+	struct pch_udc_data_dma_desc *td = req->td_data;
+	unsigned i = req->chain_len;
+
+	dma_addr_t addr2;
+	dma_addr_t addr = (dma_addr_t)td->next;
+	td->next = 0x00;
+	for (; i > 1; --i) {
+		/* do not free first desc., will be done by free for request */
+		td = phys_to_virt(addr);
+		addr2 = (dma_addr_t)td->next;
+		pci_pool_free(dev->data_requests, td, addr);
+		td->next = 0x00;
+		addr = addr2;
+	}
+	req->chain_len = 1;
+}
+
+/**
+ * pch_udc_create_dma_chain() - This function creates or reinitializes
+ *				a DMA chain
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request
+ * @buf_len:	The buffer length
+ * @gfp_flags:	Flags to be used while mapping the data buffer
+ *
+ * Return codes:
+ *	0:		success,
+ *	-ENOMEM:	pci_pool_alloc invocation fails
+ */
+static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
+				    struct pch_udc_request *req,
+				    unsigned long buf_len,
+				    gfp_t gfp_flags)
+{
+	struct pch_udc_data_dma_desc *td = req->td_data, *last;
+	unsigned long bytes = req->req.length, i = 0;
+	dma_addr_t dma_addr;
+	unsigned len = 1;
+
+	if (req->chain_len > 1)
+		pch_udc_free_dma_chain(ep->dev, req);
+
+	if (req->dma == DMA_ADDR_INVALID)
+		td->dataptr = req->req.dma;
+	else
+		td->dataptr = req->dma;
+
+	td->status = PCH_UDC_BS_HST_BSY;
+	for (; ; bytes -= buf_len, ++len) {
+		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
+		if (bytes <= buf_len)
+			break;
+		last = td;
+		td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
+				    &dma_addr);
+		if (!td)
+			goto nomem;
+		i += buf_len;
+		td->dataptr = req->td_data->dataptr + i;
+		last->next = dma_addr;
+	}
+
+	req->td_data_last = td;
+	td->status |= PCH_UDC_DMA_LAST;
+	td->next = req->td_data_phys;
+	req->chain_len = len;
+	return 0;
+
+nomem:
+	if (len > 1) {
+		req->chain_len = len;
+		pch_udc_free_dma_chain(ep->dev, req);
+	}
+	req->chain_len = 1;
+	return -ENOMEM;
+}
+
+/**
+ * prepare_dma() - This function creates and initializes the DMA chain
+ *			for the request
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request
+ * @gfp:	Flag to be used while mapping the data buffer
+ *
+ * Return codes:
+ *	0:		Success
+ *	Other 0:	linux error number on failure
+ */
+static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
+			  gfp_t gfp)
+{
+	int	retval;
+
+	/* Allocate and create a DMA chain */
+	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
+	if (retval) {
+		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
+		return retval;
+	}
+	if (ep->in)
+		req->td_data->status = (req->td_data->status &
+				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
+	return 0;
+}
+
+/**
+ * process_zlp() - This function process zero length packets
+ *			from the gadget driver
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request
+ */
+static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
+{
+	struct pch_udc_dev	*dev = ep->dev;
+
+	/* IN zlp's are handled by hardware */
+	complete_req(ep, req, 0);
+
+	/* if set_config or set_intf is waiting for ack by zlp
+	 * then set CSR_DONE
+	 */
+	if (dev->set_cfg_not_acked) {
+		pch_udc_set_csr_done(dev);
+		dev->set_cfg_not_acked = 0;
+	}
+	/* setup command is ACK'ed now by zlp */
+	if (!dev->stall && dev->waiting_zlp_ack) {
+		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
+		dev->waiting_zlp_ack = 0;
+	}
+}
+
+/**
+ * pch_udc_start_rxrequest() - This function starts the receive requirement.
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request structure
+ */
+static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
+					 struct pch_udc_request *req)
+{
+	struct pch_udc_data_dma_desc *td_data;
+
+	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+	td_data = req->td_data;
+	/* Set the status bits for all descriptors */
+	while (1) {
+		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
+				    PCH_UDC_BS_HST_RDY;
+		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
+			break;
+		td_data = phys_to_virt(td_data->next);
+	}
+	/* Write the descriptor pointer */
+	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
+	req->dma_going = 1;
+	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
+	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
+	pch_udc_ep_clear_nak(ep);
+	pch_udc_ep_set_rrdy(ep);
+}
+
+/**
+ * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
+ *				from gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @desc:	Reference to the USB endpoint descriptor structure
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:
+ *	-ESHUTDOWN:
+ */
+static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
+				    const struct usb_endpoint_descriptor *desc)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	unsigned long		iflags;
+
+	if (!usbep || (usbep->name == ep0_string) || !desc ||
+	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
+		return -EINVAL;
+
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+	spin_lock_irqsave(&dev->lock, iflags);
+	ep->desc = desc;
+	ep->halted = 0;
+	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
+	ep->ep.maxpacket = usb_endpoint_maxp(desc);
+	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+	spin_unlock_irqrestore(&dev->lock, iflags);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
+ *				from gadget driver
+ * @usbep	Reference to the USB endpoint structure
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:
+ */
+static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	unsigned long	iflags;
+
+	if (!usbep)
+		return -EINVAL;
+
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if ((usbep->name == ep0_string) || !ep->desc)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->dev->lock, iflags);
+	empty_req_queue(ep);
+	ep->halted = 1;
+	pch_udc_ep_disable(ep);
+	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	INIT_LIST_HEAD(&ep->queue);
+	spin_unlock_irqrestore(&ep->dev->lock, iflags);
+	return 0;
+}
+
+/**
+ * pch_udc_alloc_request() - This function allocates request structure.
+ *				It is called by gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @gfp:	Flag to be used while allocating memory
+ *
+ * Return codes:
+ *	NULL:			Failure
+ *	Allocated address:	Success
+ */
+static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
+						  gfp_t gfp)
+{
+	struct pch_udc_request		*req;
+	struct pch_udc_ep		*ep;
+	struct pch_udc_data_dma_desc	*dma_desc;
+	struct pch_udc_dev		*dev;
+
+	if (!usbep)
+		return NULL;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	req = kzalloc(sizeof *req, gfp);
+	if (!req)
+		return NULL;
+	req->req.dma = DMA_ADDR_INVALID;
+	req->dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+	if (!ep->dev->dma_addr)
+		return &req->req;
+	/* ep0 in requests are allocated from data pool here */
+	dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
+				  &req->td_data_phys);
+	if (NULL == dma_desc) {
+		kfree(req);
+		return NULL;
+	}
+	/* prevent from using desc. - set HOST BUSY */
+	dma_desc->status |= PCH_UDC_BS_HST_BSY;
+	dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
+	req->td_data = dma_desc;
+	req->td_data_last = dma_desc;
+	req->chain_len = 1;
+	return &req->req;
+}
+
+/**
+ * pch_udc_free_request() - This function frees request structure.
+ *				It is called by gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @usbreq:	Reference to the USB request
+ */
+static void pch_udc_free_request(struct usb_ep *usbep,
+				  struct usb_request *usbreq)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_request	*req;
+	struct pch_udc_dev	*dev;
+
+	if (!usbep || !usbreq)
+		return;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	req = container_of(usbreq, struct pch_udc_request, req);
+	dev = ep->dev;
+	if (!list_empty(&req->queue))
+		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
+			__func__, usbep->name, req);
+	if (req->td_data != NULL) {
+		if (req->chain_len > 1)
+			pch_udc_free_dma_chain(ep->dev, req);
+		pci_pool_free(ep->dev->data_requests, req->td_data,
+			      req->td_data_phys);
+	}
+	kfree(req);
+}
+
+/**
+ * pch_udc_pcd_queue() - This function queues a request packet. It is called
+ *			by gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @usbreq:	Reference to the USB request
+ * @gfp:	Flag to be used while mapping the data buffer
+ *
+ * Return codes:
+ *	0:			Success
+ *	linux error number:	Failure
+ */
+static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
+								 gfp_t gfp)
+{
+	int retval = 0;
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	struct pch_udc_request	*req;
+	unsigned long	iflags;
+
+	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
+		return -EINVAL;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!ep->desc && ep->num)
+		return -EINVAL;
+	req = container_of(usbreq, struct pch_udc_request, req);
+	if (!list_empty(&req->queue))
+		return -EINVAL;
+	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+	spin_lock_irqsave(&dev->lock, iflags);
+	/* map the buffer for dma */
+	if (usbreq->length &&
+	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
+		if (!((unsigned long)(usbreq->buf) & 0x03)) {
+			if (ep->in)
+				usbreq->dma = dma_map_single(&dev->pdev->dev,
+							     usbreq->buf,
+							     usbreq->length,
+							     DMA_TO_DEVICE);
+			else
+				usbreq->dma = dma_map_single(&dev->pdev->dev,
+							     usbreq->buf,
+							     usbreq->length,
+							     DMA_FROM_DEVICE);
+		} else {
+			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
+			if (!req->buf) {
+				retval = -ENOMEM;
+				goto probe_end;
+			}
+			if (ep->in) {
+				memcpy(req->buf, usbreq->buf, usbreq->length);
+				req->dma = dma_map_single(&dev->pdev->dev,
+							  req->buf,
+							  usbreq->length,
+							  DMA_TO_DEVICE);
+			} else
+				req->dma = dma_map_single(&dev->pdev->dev,
+							  req->buf,
+							  usbreq->length,
+							  DMA_FROM_DEVICE);
+		}
+		req->dma_mapped = 1;
+	}
+	if (usbreq->length > 0) {
+		retval = prepare_dma(ep, req, GFP_ATOMIC);
+		if (retval)
+			goto probe_end;
+	}
+	usbreq->actual = 0;
+	usbreq->status = -EINPROGRESS;
+	req->dma_done = 0;
+	if (list_empty(&ep->queue) && !ep->halted) {
+		/* no pending transfer, so start this req */
+		if (!usbreq->length) {
+			process_zlp(ep, req);
+			retval = 0;
+			goto probe_end;
+		}
+		if (!ep->in) {
+			pch_udc_start_rxrequest(ep, req);
+		} else {
+			/*
+			* For IN trfr the descriptors will be programmed and
+			* P bit will be set when
+			* we get an IN token
+			*/
+			pch_udc_wait_ep_stall(ep);
+			pch_udc_ep_clear_nak(ep);
+			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
+		}
+	}
+	/* Now add this request to the ep's pending requests */
+	if (req != NULL)
+		list_add_tail(&req->queue, &ep->queue);
+
+probe_end:
+	spin_unlock_irqrestore(&dev->lock, iflags);
+	return retval;
+}
+
+/**
+ * pch_udc_pcd_dequeue() - This function de-queues a request packet.
+ *				It is called by gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @usbreq:	Reference to the USB request
+ *
+ * Return codes:
+ *	0:			Success
+ *	linux error number:	Failure
+ */
+static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
+				struct usb_request *usbreq)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_request	*req;
+	struct pch_udc_dev	*dev;
+	unsigned long		flags;
+	int ret = -EINVAL;
+
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!usbep || !usbreq || (!ep->desc && ep->num))
+		return ret;
+	req = container_of(usbreq, struct pch_udc_request, req);
+	spin_lock_irqsave(&ep->dev->lock, flags);
+	/* make sure it's still queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == usbreq) {
+			pch_udc_ep_set_nak(ep);
+			if (!list_empty(&req->queue))
+				complete_req(ep, req, -ECONNRESET);
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+	return ret;
+}
+
+/**
+ * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
+ *			    feature
+ * @usbep:	Reference to the USB endpoint structure
+ * @halt:	Specifies whether to set or clear the feature
+ *
+ * Return codes:
+ *	0:			Success
+ *	linux error number:	Failure
+ */
+static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	unsigned long iflags;
+	int ret;
+
+	if (!usbep)
+		return -EINVAL;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!ep->desc && !ep->num)
+		return -EINVAL;
+	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+	spin_lock_irqsave(&udc_stall_spinlock, iflags);
+	if (list_empty(&ep->queue)) {
+		if (halt) {
+			if (ep->num == PCH_UDC_EP0)
+				ep->dev->stall = 1;
+			pch_udc_ep_set_stall(ep);
+			pch_udc_enable_ep_interrupts(ep->dev,
+						     PCH_UDC_EPINT(ep->in,
+								   ep->num));
+		} else {
+			pch_udc_ep_clear_stall(ep);
+		}
+		ret = 0;
+	} else {
+		ret = -EAGAIN;
+	}
+	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+	return ret;
+}
+
+/**
+ * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
+ *				halt feature
+ * @usbep:	Reference to the USB endpoint structure
+ * @halt:	Specifies whether to set or clear the feature
+ *
+ * Return codes:
+ *	0:			Success
+ *	linux error number:	Failure
+ */
+static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	unsigned long iflags;
+	int ret;
+
+	if (!usbep)
+		return -EINVAL;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!ep->desc && !ep->num)
+		return -EINVAL;
+	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+	spin_lock_irqsave(&udc_stall_spinlock, iflags);
+	if (!list_empty(&ep->queue)) {
+		ret = -EAGAIN;
+	} else {
+		if (ep->num == PCH_UDC_EP0)
+			ep->dev->stall = 1;
+		pch_udc_ep_set_stall(ep);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+		ep->dev->prot_stall = 1;
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+	return ret;
+}
+
+/**
+ * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
+ * @usbep:	Reference to the USB endpoint structure
+ */
+static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
+{
+	struct pch_udc_ep  *ep;
+
+	if (!usbep)
+		return;
+
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	if (ep->desc || !ep->num)
+		pch_udc_ep_fifo_flush(ep, ep->in);
+}
+
+static const struct usb_ep_ops pch_udc_ep_ops = {
+	.enable		= pch_udc_pcd_ep_enable,
+	.disable	= pch_udc_pcd_ep_disable,
+	.alloc_request	= pch_udc_alloc_request,
+	.free_request	= pch_udc_free_request,
+	.queue		= pch_udc_pcd_queue,
+	.dequeue	= pch_udc_pcd_dequeue,
+	.set_halt	= pch_udc_pcd_set_halt,
+	.set_wedge	= pch_udc_pcd_set_wedge,
+	.fifo_status	= NULL,
+	.fifo_flush	= pch_udc_pcd_fifo_flush,
+};
+
+/**
+ * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
+ * @td_stp:	Reference to the SETP buffer structure
+ */
+static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
+{
+	static u32	pky_marker;
+
+	if (!td_stp)
+		return;
+	td_stp->reserved = ++pky_marker;
+	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
+	td_stp->status = PCH_UDC_BS_HST_RDY;
+}
+
+/**
+ * pch_udc_start_next_txrequest() - This function starts
+ *					the next transmission requirement
+ * @ep:	Reference to the endpoint structure
+ */
+static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
+{
+	struct pch_udc_request *req;
+	struct pch_udc_data_dma_desc *td_data;
+
+	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
+		return;
+
+	if (list_empty(&ep->queue))
+		return;
+
+	/* next request */
+	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+	if (req->dma_going)
+		return;
+	if (!req->td_data)
+		return;
+	pch_udc_wait_ep_stall(ep);
+	req->dma_going = 1;
+	pch_udc_ep_set_ddptr(ep, 0);
+	td_data = req->td_data;
+	while (1) {
+		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
+				   PCH_UDC_BS_HST_RDY;
+		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
+			break;
+		td_data = phys_to_virt(td_data->next);
+	}
+	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
+	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
+	pch_udc_ep_set_pd(ep);
+	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+	pch_udc_ep_clear_nak(ep);
+}
+
+/**
+ * pch_udc_complete_transfer() - This function completes a transfer
+ * @ep:		Reference to the endpoint structure
+ */
+static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
+{
+	struct pch_udc_request *req;
+	struct pch_udc_dev *dev = ep->dev;
+
+	if (list_empty(&ep->queue))
+		return;
+	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
+	    PCH_UDC_BS_DMA_DONE)
+		return;
+	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
+	     PCH_UDC_RTS_SUCC) {
+		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
+			"epstatus=0x%08x\n",
+		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
+		       (int)(ep->epsts));
+		return;
+	}
+
+	req->req.actual = req->req.length;
+	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
+	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
+	complete_req(ep, req, 0);
+	req->dma_going = 0;
+	if (!list_empty(&ep->queue)) {
+		pch_udc_wait_ep_stall(ep);
+		pch_udc_ep_clear_nak(ep);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+	} else {
+		pch_udc_disable_ep_interrupts(ep->dev,
+					      PCH_UDC_EPINT(ep->in, ep->num));
+	}
+}
+
+/**
+ * pch_udc_complete_receiver() - This function completes a receiver
+ * @ep:		Reference to the endpoint structure
+ */
+static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
+{
+	struct pch_udc_request *req;
+	struct pch_udc_dev *dev = ep->dev;
+	unsigned int count;
+	struct pch_udc_data_dma_desc *td;
+	dma_addr_t addr;
+
+	if (list_empty(&ep->queue))
+		return;
+	/* next request */
+	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+	pch_udc_ep_set_ddptr(ep, 0);
+	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
+	    PCH_UDC_BS_DMA_DONE)
+		td = req->td_data_last;
+	else
+		td = req->td_data;
+
+	while (1) {
+		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
+			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
+				"epstatus=0x%08x\n",
+				(req->td_data->status & PCH_UDC_RXTX_STS),
+				(int)(ep->epsts));
+			return;
+		}
+		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
+			if (td->status | PCH_UDC_DMA_LAST) {
+				count = td->status & PCH_UDC_RXTX_BYTES;
+				break;
+			}
+		if (td == req->td_data_last) {
+			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
+			return;
+		}
+		addr = (dma_addr_t)td->next;
+		td = phys_to_virt(addr);
+	}
+	/* on 64k packets the RXBYTES field is zero */
+	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
+		count = UDC_DMA_MAXPACKET;
+	req->td_data->status |= PCH_UDC_DMA_LAST;
+	td->status |= PCH_UDC_BS_HST_BSY;
+
+	req->dma_going = 0;
+	req->req.actual = count;
+	complete_req(ep, req, 0);
+	/* If there is a new/failed requests try that now */
+	if (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+		pch_udc_start_rxrequest(ep, req);
+	}
+}
+
+/**
+ * pch_udc_svc_data_in() - This function process endpoint interrupts
+ *				for IN endpoints
+ * @dev:	Reference to the device structure
+ * @ep_num:	Endpoint that generated the interrupt
+ */
+static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
+{
+	u32	epsts;
+	struct pch_udc_ep	*ep;
+
+	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
+	epsts = ep->epsts;
+	ep->epsts = 0;
+
+	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
+		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
+		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
+		return;
+	if ((epsts & UDC_EPSTS_BNA))
+		return;
+	if (epsts & UDC_EPSTS_HE)
+		return;
+	if (epsts & UDC_EPSTS_RSS) {
+		pch_udc_ep_set_stall(ep);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+	}
+	if (epsts & UDC_EPSTS_RCS) {
+		if (!dev->prot_stall) {
+			pch_udc_ep_clear_stall(ep);
+		} else {
+			pch_udc_ep_set_stall(ep);
+			pch_udc_enable_ep_interrupts(ep->dev,
+						PCH_UDC_EPINT(ep->in, ep->num));
+		}
+	}
+	if (epsts & UDC_EPSTS_TDC)
+		pch_udc_complete_transfer(ep);
+	/* On IN interrupt, provide data if we have any */
+	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
+	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
+		pch_udc_start_next_txrequest(ep);
+}
+
+/**
+ * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
+ * @dev:	Reference to the device structure
+ * @ep_num:	Endpoint that generated the interrupt
+ */
+static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
+{
+	u32			epsts;
+	struct pch_udc_ep		*ep;
+	struct pch_udc_request		*req = NULL;
+
+	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
+	epsts = ep->epsts;
+	ep->epsts = 0;
+
+	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
+		/* next request */
+		req = list_entry(ep->queue.next, struct pch_udc_request,
+				 queue);
+		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
+		     PCH_UDC_BS_DMA_DONE) {
+			if (!req->dma_going)
+				pch_udc_start_rxrequest(ep, req);
+			return;
+		}
+	}
+	if (epsts & UDC_EPSTS_HE)
+		return;
+	if (epsts & UDC_EPSTS_RSS) {
+		pch_udc_ep_set_stall(ep);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+	}
+	if (epsts & UDC_EPSTS_RCS) {
+		if (!dev->prot_stall) {
+			pch_udc_ep_clear_stall(ep);
+		} else {
+			pch_udc_ep_set_stall(ep);
+			pch_udc_enable_ep_interrupts(ep->dev,
+						PCH_UDC_EPINT(ep->in, ep->num));
+		}
+	}
+	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+	    UDC_EPSTS_OUT_DATA) {
+		if (ep->dev->prot_stall == 1) {
+			pch_udc_ep_set_stall(ep);
+			pch_udc_enable_ep_interrupts(ep->dev,
+						PCH_UDC_EPINT(ep->in, ep->num));
+		} else {
+			pch_udc_complete_receiver(ep);
+		}
+	}
+	if (list_empty(&ep->queue))
+		pch_udc_set_dma(dev, DMA_DIR_RX);
+}
+
+/**
+ * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
+ * @dev:	Reference to the device structure
+ */
+static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
+{
+	u32	epsts;
+	struct pch_udc_ep	*ep;
+	struct pch_udc_ep	*ep_out;
+
+	ep = &dev->ep[UDC_EP0IN_IDX];
+	ep_out = &dev->ep[UDC_EP0OUT_IDX];
+	epsts = ep->epsts;
+	ep->epsts = 0;
+
+	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
+		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
+		       UDC_EPSTS_XFERDONE)))
+		return;
+	if ((epsts & UDC_EPSTS_BNA))
+		return;
+	if (epsts & UDC_EPSTS_HE)
+		return;
+	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
+		pch_udc_complete_transfer(ep);
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
+		ep_out->td_data->status = (ep_out->td_data->status &
+					~PCH_UDC_BUFF_STS) |
+					PCH_UDC_BS_HST_RDY;
+		pch_udc_ep_clear_nak(ep_out);
+		pch_udc_set_dma(dev, DMA_DIR_RX);
+		pch_udc_ep_set_rrdy(ep_out);
+	}
+	/* On IN interrupt, provide data if we have any */
+	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
+	     !(epsts & UDC_EPSTS_TXEMPTY))
+		pch_udc_start_next_txrequest(ep);
+}
+
+/**
+ * pch_udc_svc_control_out() - Routine that handle Control
+ *					OUT endpoint interrupts
+ * @dev:	Reference to the device structure
+ */
+static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
+{
+	u32	stat;
+	int setup_supported;
+	struct pch_udc_ep	*ep;
+
+	ep = &dev->ep[UDC_EP0OUT_IDX];
+	stat = ep->epsts;
+	ep->epsts = 0;
+
+	/* If setup data */
+	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+	    UDC_EPSTS_OUT_SETUP) {
+		dev->stall = 0;
+		dev->ep[UDC_EP0IN_IDX].halted = 0;
+		dev->ep[UDC_EP0OUT_IDX].halted = 0;
+		dev->setup_data = ep->td_stp->request;
+		pch_udc_init_setup_buff(ep->td_stp);
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
+		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
+				      dev->ep[UDC_EP0IN_IDX].in);
+		if ((dev->setup_data.bRequestType & USB_DIR_IN))
+			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
+		else /* OUT */
+			dev->gadget.ep0 = &ep->ep;
+		spin_unlock(&dev->lock);
+		/* If Mass storage Reset */
+		if ((dev->setup_data.bRequestType == 0x21) &&
+		    (dev->setup_data.bRequest == 0xFF))
+			dev->prot_stall = 0;
+		/* call gadget with setup data received */
+		setup_supported = dev->driver->setup(&dev->gadget,
+						     &dev->setup_data);
+		spin_lock(&dev->lock);
+
+		if (dev->setup_data.bRequestType & USB_DIR_IN) {
+			ep->td_data->status = (ep->td_data->status &
+						~PCH_UDC_BUFF_STS) |
+						PCH_UDC_BS_HST_RDY;
+			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+		}
+		/* ep0 in returns data on IN phase */
+		if (setup_supported >= 0 && setup_supported <
+					    UDC_EP0IN_MAX_PKT_SIZE) {
+			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
+			/* Gadget would have queued a request when
+			 * we called the setup */
+			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
+				pch_udc_set_dma(dev, DMA_DIR_RX);
+				pch_udc_ep_clear_nak(ep);
+			}
+		} else if (setup_supported < 0) {
+			/* if unsupported request, then stall */
+			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
+			pch_udc_enable_ep_interrupts(ep->dev,
+						PCH_UDC_EPINT(ep->in, ep->num));
+			dev->stall = 0;
+			pch_udc_set_dma(dev, DMA_DIR_RX);
+		} else {
+			dev->waiting_zlp_ack = 1;
+		}
+	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
+		pch_udc_ep_set_ddptr(ep, 0);
+		if (!list_empty(&ep->queue)) {
+			ep->epsts = stat;
+			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
+		}
+		pch_udc_set_dma(dev, DMA_DIR_RX);
+	}
+	pch_udc_ep_set_rrdy(ep);
+}
+
+
+/**
+ * pch_udc_postsvc_epinters() - This function enables end point interrupts
+ *				and clears NAK status
+ * @dev:	Reference to the device structure
+ * @ep_num:	End point number
+ */
+static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_request *req;
+
+	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
+	if (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+		pch_udc_ep_clear_nak(ep);
+	}
+}
+
+/**
+ * pch_udc_read_all_epstatus() - This function read all endpoint status
+ * @dev:	Reference to the device structure
+ * @ep_intr:	Status of endpoint interrupt
+ */
+static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
+{
+	int i;
+	struct pch_udc_ep	*ep;
+
+	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
+		/* IN */
+		if (ep_intr & (0x1 << i)) {
+			ep = &dev->ep[UDC_EPIN_IDX(i)];
+			ep->epsts = pch_udc_read_ep_status(ep);
+			pch_udc_clear_ep_status(ep, ep->epsts);
+		}
+		/* OUT */
+		if (ep_intr & (0x10000 << i)) {
+			ep = &dev->ep[UDC_EPOUT_IDX(i)];
+			ep->epsts = pch_udc_read_ep_status(ep);
+			pch_udc_clear_ep_status(ep, ep->epsts);
+		}
+	}
+}
+
+/**
+ * pch_udc_activate_control_ep() - This function enables the control endpoints
+ *					for traffic after a reset
+ * @dev:	Reference to the device structure
+ */
+static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
+{
+	struct pch_udc_ep	*ep;
+	u32 val;
+
+	/* Setup the IN endpoint */
+	ep = &dev->ep[UDC_EP0IN_IDX];
+	pch_udc_clear_ep_control(ep);
+	pch_udc_ep_fifo_flush(ep, ep->in);
+	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
+	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
+	/* Initialize the IN EP Descriptor */
+	ep->td_data      = NULL;
+	ep->td_stp       = NULL;
+	ep->td_data_phys = 0;
+	ep->td_stp_phys  = 0;
+
+	/* Setup the OUT endpoint */
+	ep = &dev->ep[UDC_EP0OUT_IDX];
+	pch_udc_clear_ep_control(ep);
+	pch_udc_ep_fifo_flush(ep, ep->in);
+	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
+	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
+	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
+	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
+
+	/* Initialize the SETUP buffer */
+	pch_udc_init_setup_buff(ep->td_stp);
+	/* Write the pointer address of dma descriptor */
+	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
+	/* Write the pointer address of Setup descriptor */
+	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+
+	/* Initialize the dma descriptor */
+	ep->td_data->status  = PCH_UDC_DMA_LAST;
+	ep->td_data->dataptr = dev->dma_addr;
+	ep->td_data->next    = ep->td_data_phys;
+
+	pch_udc_ep_clear_nak(ep);
+}
+
+
+/**
+ * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
+ * @dev:	Reference to driver structure
+ */
+static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
+{
+	struct pch_udc_ep	*ep;
+	int i;
+
+	pch_udc_clear_dma(dev, DMA_DIR_TX);
+	pch_udc_clear_dma(dev, DMA_DIR_RX);
+	/* Mask all endpoint interrupts */
+	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+	/* clear all endpoint interrupts */
+	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
+		ep = &dev->ep[i];
+		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
+		pch_udc_clear_ep_control(ep);
+		pch_udc_ep_set_ddptr(ep, 0);
+		pch_udc_write_csr(ep->dev, 0x00, i);
+	}
+	dev->stall = 0;
+	dev->prot_stall = 0;
+	dev->waiting_zlp_ack = 0;
+	dev->set_cfg_not_acked = 0;
+
+	/* disable ep to empty req queue. Skip the control EP's */
+	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
+		ep = &dev->ep[i];
+		pch_udc_ep_set_nak(ep);
+		pch_udc_ep_fifo_flush(ep, ep->in);
+		/* Complete request queue */
+		empty_req_queue(ep);
+	}
+	if (dev->driver && dev->driver->disconnect) {
+		spin_unlock(&dev->lock);
+		dev->driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+	}
+}
+
+/**
+ * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
+ *				done interrupt
+ * @dev:	Reference to driver structure
+ */
+static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
+{
+	u32 dev_stat, dev_speed;
+	u32 speed = USB_SPEED_FULL;
+
+	dev_stat = pch_udc_read_device_status(dev);
+	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
+						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
+	switch (dev_speed) {
+	case UDC_DEVSTS_ENUM_SPEED_HIGH:
+		speed = USB_SPEED_HIGH;
+		break;
+	case  UDC_DEVSTS_ENUM_SPEED_FULL:
+		speed = USB_SPEED_FULL;
+		break;
+	case  UDC_DEVSTS_ENUM_SPEED_LOW:
+		speed = USB_SPEED_LOW;
+		break;
+	default:
+		BUG();
+	}
+	dev->gadget.speed = speed;
+	pch_udc_activate_control_ep(dev);
+	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
+	pch_udc_set_dma(dev, DMA_DIR_TX);
+	pch_udc_set_dma(dev, DMA_DIR_RX);
+	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
+
+	/* enable device interrupts */
+	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
+					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
+					UDC_DEVINT_SI | UDC_DEVINT_SC);
+}
+
+/**
+ * pch_udc_svc_intf_interrupt() - This function handles a set interface
+ *				  interrupt
+ * @dev:	Reference to driver structure
+ */
+static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
+{
+	u32 reg, dev_stat = 0;
+	int i, ret;
+
+	dev_stat = pch_udc_read_device_status(dev);
+	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
+							 UDC_DEVSTS_INTF_SHIFT;
+	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
+							 UDC_DEVSTS_ALT_SHIFT;
+	dev->set_cfg_not_acked = 1;
+	/* Construct the usb request for gadget driver and inform it */
+	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
+	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
+	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
+	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
+	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
+	/* programm the Endpoint Cfg registers */
+	/* Only one end point cfg register */
+	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
+	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
+	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
+	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
+	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
+	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
+	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
+		/* clear stall bits */
+		pch_udc_ep_clear_stall(&(dev->ep[i]));
+		dev->ep[i].halted = 0;
+	}
+	dev->stall = 0;
+	spin_unlock(&dev->lock);
+	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+	spin_lock(&dev->lock);
+}
+
+/**
+ * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
+ *				interrupt
+ * @dev:	Reference to driver structure
+ */
+static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
+{
+	int i, ret;
+	u32 reg, dev_stat = 0;
+
+	dev_stat = pch_udc_read_device_status(dev);
+	dev->set_cfg_not_acked = 1;
+	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
+				UDC_DEVSTS_CFG_SHIFT;
+	/* make usb request for gadget driver */
+	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
+	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
+	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
+	/* program the NE registers */
+	/* Only one end point cfg register */
+	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
+	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
+	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
+	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
+	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
+		/* clear stall bits */
+		pch_udc_ep_clear_stall(&(dev->ep[i]));
+		dev->ep[i].halted = 0;
+	}
+	dev->stall = 0;
+
+	/* call gadget zero with setup data received */
+	spin_unlock(&dev->lock);
+	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+	spin_lock(&dev->lock);
+}
+
+/**
+ * pch_udc_dev_isr() - This function services device interrupts
+ *			by invoking appropriate routines.
+ * @dev:	Reference to the device structure
+ * @dev_intr:	The Device interrupt status.
+ */
+static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
+{
+	int vbus;
+
+	/* USB Reset Interrupt */
+	if (dev_intr & UDC_DEVINT_UR) {
+		pch_udc_svc_ur_interrupt(dev);
+		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
+	}
+	/* Enumeration Done Interrupt */
+	if (dev_intr & UDC_DEVINT_ENUM) {
+		pch_udc_svc_enum_interrupt(dev);
+		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
+	}
+	/* Set Interface Interrupt */
+	if (dev_intr & UDC_DEVINT_SI)
+		pch_udc_svc_intf_interrupt(dev);
+	/* Set Config Interrupt */
+	if (dev_intr & UDC_DEVINT_SC)
+		pch_udc_svc_cfg_interrupt(dev);
+	/* USB Suspend interrupt */
+	if (dev_intr & UDC_DEVINT_US) {
+		if (dev->driver
+			&& dev->driver->suspend) {
+			spin_unlock(&dev->lock);
+			dev->driver->suspend(&dev->gadget);
+			spin_lock(&dev->lock);
+		}
+
+		vbus = pch_vbus_gpio_get_value(dev);
+		if ((dev->vbus_session == 0)
+			&& (vbus != 1)) {
+			if (dev->driver && dev->driver->disconnect) {
+				spin_unlock(&dev->lock);
+				dev->driver->disconnect(&dev->gadget);
+				spin_lock(&dev->lock);
+			}
+			pch_udc_reconnect(dev);
+		} else if ((dev->vbus_session == 0)
+			&& (vbus == 1)
+			&& !dev->vbus_gpio.intr)
+			schedule_work(&dev->vbus_gpio.irq_work_fall);
+
+		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
+	}
+	/* Clear the SOF interrupt, if enabled */
+	if (dev_intr & UDC_DEVINT_SOF)
+		dev_dbg(&dev->pdev->dev, "SOF\n");
+	/* ES interrupt, IDLE > 3ms on the USB */
+	if (dev_intr & UDC_DEVINT_ES)
+		dev_dbg(&dev->pdev->dev, "ES\n");
+	/* RWKP interrupt */
+	if (dev_intr & UDC_DEVINT_RWKP)
+		dev_dbg(&dev->pdev->dev, "RWKP\n");
+}
+
+/**
+ * pch_udc_isr() - This function handles interrupts from the PCH USB Device
+ * @irq:	Interrupt request number
+ * @dev:	Reference to the device structure
+ */
+static irqreturn_t pch_udc_isr(int irq, void *pdev)
+{
+	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
+	u32 dev_intr, ep_intr;
+	int i;
+
+	dev_intr = pch_udc_read_device_interrupts(dev);
+	ep_intr = pch_udc_read_ep_interrupts(dev);
+
+	/* For a hot plug, this find that the controller is hung up. */
+	if (dev_intr == ep_intr)
+		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
+			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
+			/* The controller is reset */
+			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+			return IRQ_HANDLED;
+		}
+	if (dev_intr)
+		/* Clear device interrupts */
+		pch_udc_write_device_interrupts(dev, dev_intr);
+	if (ep_intr)
+		/* Clear ep interrupts */
+		pch_udc_write_ep_interrupts(dev, ep_intr);
+	if (!dev_intr && !ep_intr)
+		return IRQ_NONE;
+	spin_lock(&dev->lock);
+	if (dev_intr)
+		pch_udc_dev_isr(dev, dev_intr);
+	if (ep_intr) {
+		pch_udc_read_all_epstatus(dev, ep_intr);
+		/* Process Control In interrupts, if present */
+		if (ep_intr & UDC_EPINT_IN_EP0) {
+			pch_udc_svc_control_in(dev);
+			pch_udc_postsvc_epinters(dev, 0);
+		}
+		/* Process Control Out interrupts, if present */
+		if (ep_intr & UDC_EPINT_OUT_EP0)
+			pch_udc_svc_control_out(dev);
+		/* Process data in end point interrupts */
+		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
+			if (ep_intr & (1 <<  i)) {
+				pch_udc_svc_data_in(dev, i);
+				pch_udc_postsvc_epinters(dev, i);
+			}
+		}
+		/* Process data out end point interrupts */
+		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
+						 PCH_UDC_USED_EP_NUM); i++)
+			if (ep_intr & (1 <<  i))
+				pch_udc_svc_data_out(dev, i -
+							 UDC_EPINT_OUT_SHIFT);
+	}
+	spin_unlock(&dev->lock);
+	return IRQ_HANDLED;
+}
+
+/**
+ * pch_udc_setup_ep0() - This function enables control endpoint for traffic
+ * @dev:	Reference to the device structure
+ */
+static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
+{
+	/* enable ep0 interrupts */
+	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
+						UDC_EPINT_OUT_EP0);
+	/* enable device interrupts */
+	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
+				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
+				       UDC_DEVINT_SI | UDC_DEVINT_SC);
+}
+
+/**
+ * gadget_release() - Free the gadget driver private data
+ * @pdev	reference to struct pci_dev
+ */
+static void gadget_release(struct device *pdev)
+{
+	struct pch_udc_dev *dev = dev_get_drvdata(pdev);
+
+	kfree(dev);
+}
+
+/**
+ * pch_udc_pcd_reinit() - This API initializes the endpoint structures
+ * @dev:	Reference to the driver structure
+ */
+static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
+{
+	const char *const ep_string[] = {
+		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
+		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
+		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
+		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
+		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
+		"ep15in", "ep15out",
+	};
+	int i;
+
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	INIT_LIST_HEAD(&dev->gadget.ep_list);
+
+	/* Initialize the endpoints structures */
+	memset(dev->ep, 0, sizeof dev->ep);
+	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
+		struct pch_udc_ep *ep = &dev->ep[i];
+		ep->dev = dev;
+		ep->halted = 1;
+		ep->num = i / 2;
+		ep->in = ~i & 1;
+		ep->ep.name = ep_string[i];
+		ep->ep.ops = &pch_udc_ep_ops;
+		if (ep->in)
+			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
+		else
+			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
+					  UDC_EP_REG_SHIFT;
+		/* need to set ep->ep.maxpacket and set Default Configuration?*/
+		ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
+		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+		INIT_LIST_HEAD(&ep->queue);
+	}
+	dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
+	dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
+
+	/* remove ep0 in and out from the list.  They have own pointer */
+	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
+	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
+
+	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
+	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+}
+
+/**
+ * pch_udc_pcd_init() - This API initializes the driver structure
+ * @dev:	Reference to the driver structure
+ *
+ * Return codes:
+ *	0: Success
+ */
+static int pch_udc_pcd_init(struct pch_udc_dev *dev)
+{
+	pch_udc_init(dev);
+	pch_udc_pcd_reinit(dev);
+	pch_vbus_gpio_init(dev, vbus_gpio_port);
+	return 0;
+}
+
+/**
+ * init_dma_pools() - create dma pools during initialization
+ * @pdev:	reference to struct pci_dev
+ */
+static int init_dma_pools(struct pch_udc_dev *dev)
+{
+	struct pch_udc_stp_dma_desc	*td_stp;
+	struct pch_udc_data_dma_desc	*td_data;
+
+	/* DMA setup */
+	dev->data_requests = pci_pool_create("data_requests", dev->pdev,
+		sizeof(struct pch_udc_data_dma_desc), 0, 0);
+	if (!dev->data_requests) {
+		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	/* dma desc for setup data */
+	dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
+		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
+	if (!dev->stp_requests) {
+		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
+			__func__);
+		return -ENOMEM;
+	}
+	/* setup */
+	td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
+				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
+	if (!td_stp) {
+		dev_err(&dev->pdev->dev,
+			"%s: can't allocate setup dma descriptor\n", __func__);
+		return -ENOMEM;
+	}
+	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
+
+	/* data: 0 packets !? */
+	td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
+				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
+	if (!td_data) {
+		dev_err(&dev->pdev->dev,
+			"%s: can't allocate data dma descriptor\n", __func__);
+		return -ENOMEM;
+	}
+	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
+	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
+	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
+	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
+	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
+
+	dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
+	if (!dev->ep0out_buf)
+		return -ENOMEM;
+	dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
+				       UDC_EP0OUT_BUFF_SIZE * 4,
+				       DMA_FROM_DEVICE);
+	return 0;
+}
+
+static int pch_udc_start(struct usb_gadget_driver *driver,
+	int (*bind)(struct usb_gadget *))
+{
+	struct pch_udc_dev	*dev = pch_udc;
+	int			retval;
+
+	if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
+	    !driver->setup || !driver->unbind || !driver->disconnect) {
+		dev_err(&dev->pdev->dev,
+			"%s: invalid driver parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!dev)
+		return -ENODEV;
+
+	if (dev->driver) {
+		dev_err(&dev->pdev->dev, "%s: already bound\n", __func__);
+		return -EBUSY;
+	}
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+
+	/* Invoke the bind routine of the gadget driver */
+	retval = bind(&dev->gadget);
+
+	if (retval) {
+		dev_err(&dev->pdev->dev, "%s: binding to %s returning %d\n",
+		       __func__, driver->driver.name, retval);
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+	/* get ready for ep0 traffic */
+	pch_udc_setup_ep0(dev);
+
+	/* clear SD */
+	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
+		pch_udc_clear_disconnect(dev);
+
+	dev->connected = 1;
+	return 0;
+}
+
+static int pch_udc_stop(struct usb_gadget_driver *driver)
+{
+	struct pch_udc_dev	*dev = pch_udc;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!driver || (driver != dev->driver)) {
+		dev_err(&dev->pdev->dev,
+			"%s: invalid driver parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+
+	/* Assures that there are no pending requests with this driver */
+	driver->disconnect(&dev->gadget);
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+	dev->connected = 0;
+
+	/* set SD */
+	pch_udc_set_disconnect(dev);
+	return 0;
+}
+
+static void pch_udc_shutdown(struct pci_dev *pdev)
+{
+	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
+
+	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+	/* disable the pullup so the host will think we're gone */
+	pch_udc_set_disconnect(dev);
+}
+
+static void pch_udc_remove(struct pci_dev *pdev)
+{
+	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
+
+	usb_del_gadget_udc(&dev->gadget);
+
+	/* gadget driver must not be registered */
+	if (dev->driver)
+		dev_err(&pdev->dev,
+			"%s: gadget driver still bound!!!\n", __func__);
+	/* dma pool cleanup */
+	if (dev->data_requests)
+		pci_pool_destroy(dev->data_requests);
+
+	if (dev->stp_requests) {
+		/* cleanup DMA desc's for ep0in */
+		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
+			pci_pool_free(dev->stp_requests,
+				dev->ep[UDC_EP0OUT_IDX].td_stp,
+				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
+		}
+		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
+			pci_pool_free(dev->stp_requests,
+				dev->ep[UDC_EP0OUT_IDX].td_data,
+				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
+		}
+		pci_pool_destroy(dev->stp_requests);
+	}
+
+	if (dev->dma_addr)
+		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
+				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
+	kfree(dev->ep0out_buf);
+
+	pch_vbus_gpio_free(dev);
+
+	pch_udc_exit(dev);
+
+	if (dev->irq_registered)
+		free_irq(pdev->irq, dev);
+	if (dev->base_addr)
+		iounmap(dev->base_addr);
+	if (dev->mem_region)
+		release_mem_region(dev->phys_addr,
+				   pci_resource_len(pdev, PCH_UDC_PCI_BAR));
+	if (dev->active)
+		pci_disable_device(pdev);
+	if (dev->registered)
+		device_unregister(&dev->gadget.dev);
+	kfree(dev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
+
+	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+	pci_disable_device(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev,
+			"%s: could not save PCI config state\n", __func__);
+		return -ENOMEM;
+	}
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int pch_udc_resume(struct pci_dev *pdev)
+{
+	int ret;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
+		return ret;
+	}
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	return 0;
+}
+#else
+#define pch_udc_suspend	NULL
+#define pch_udc_resume	NULL
+#endif /* CONFIG_PM */
+
+static int pch_udc_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *id)
+{
+	unsigned long		resource;
+	unsigned long		len;
+	int			retval;
+	struct pch_udc_dev	*dev;
+
+	/* one udc only */
+	if (pch_udc) {
+		pr_err("%s: already probed\n", __func__);
+		return -EBUSY;
+	}
+	/* init */
+	dev = kzalloc(sizeof *dev, GFP_KERNEL);
+	if (!dev) {
+		pr_err("%s: no memory for device structure\n", __func__);
+		return -ENOMEM;
+	}
+	/* pci setup */
+	if (pci_enable_device(pdev) < 0) {
+		kfree(dev);
+		pr_err("%s: pci_enable_device failed\n", __func__);
+		return -ENODEV;
+	}
+	dev->active = 1;
+	pci_set_drvdata(pdev, dev);
+
+	/* PCI resource allocation */
+	resource = pci_resource_start(pdev, 1);
+	len = pci_resource_len(pdev, 1);
+
+	if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
+		dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
+		retval = -EBUSY;
+		goto finished;
+	}
+	dev->phys_addr = resource;
+	dev->mem_region = 1;
+
+	dev->base_addr = ioremap_nocache(resource, len);
+	if (!dev->base_addr) {
+		pr_err("%s: device memory cannot be mapped\n", __func__);
+		retval = -ENOMEM;
+		goto finished;
+	}
+	if (!pdev->irq) {
+		dev_err(&pdev->dev, "%s: irq not set\n", __func__);
+		retval = -ENODEV;
+		goto finished;
+	}
+	pch_udc = dev;
+	/* initialize the hardware */
+	if (pch_udc_pcd_init(dev)) {
+		retval = -ENODEV;
+		goto finished;
+	}
+	if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
+			dev)) {
+		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
+			pdev->irq);
+		retval = -ENODEV;
+		goto finished;
+	}
+	dev->irq = pdev->irq;
+	dev->irq_registered = 1;
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	/* device struct setup */
+	spin_lock_init(&dev->lock);
+	dev->pdev = pdev;
+	dev->gadget.ops = &pch_udc_ops;
+
+	retval = init_dma_pools(dev);
+	if (retval)
+		goto finished;
+
+	dev_set_name(&dev->gadget.dev, "gadget");
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = KBUILD_MODNAME;
+	dev->gadget.max_speed = USB_SPEED_HIGH;
+
+	retval = device_register(&dev->gadget.dev);
+	if (retval)
+		goto finished;
+	dev->registered = 1;
+
+	/* Put the device in disconnected state till a driver is bound */
+	pch_udc_set_disconnect(dev);
+	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+	if (retval)
+		goto finished;
+	return 0;
+
+finished:
+	pch_udc_remove(pdev);
+	return retval;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
+		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+		.class_mask = 0xffffffff,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
+		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+		.class_mask = 0xffffffff,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
+		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+		.class_mask = 0xffffffff,
+	},
+	{ 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
+
+
+static struct pci_driver pch_udc_driver = {
+	.name =	KBUILD_MODNAME,
+	.id_table =	pch_udc_pcidev_id,
+	.probe =	pch_udc_probe,
+	.remove =	pch_udc_remove,
+	.suspend =	pch_udc_suspend,
+	.resume =	pch_udc_resume,
+	.shutdown =	pch_udc_shutdown,
+};
+
+static int __init pch_udc_pci_init(void)
+{
+	return pci_register_driver(&pch_udc_driver);
+}
+module_init(pch_udc_pci_init);
+
+static void __exit pch_udc_pci_exit(void)
+{
+	pci_unregister_driver(&pch_udc_driver);
+}
+module_exit(pch_udc_pci_exit);
+
+MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
+MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/printer.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/printer.c
new file mode 100644
index 0000000..4e4dc1f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/printer.c
@@ -0,0 +1,1600 @@
+/*
+ * printer.c -- Printer gadget driver
+ *
+ * Copyright (C) 2003-2005 David Brownell
+ * Copyright (C) 2006 Craig W. Nadler
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/cdev.h>
+
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/g_printer.h>
+
+#include "gadget_chips.h"
+
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_DESC		"Printer Gadget"
+#define DRIVER_VERSION		"2007 OCT 06"
+
+static DEFINE_MUTEX(printer_mutex);
+static const char shortname [] = "printer";
+static const char driver_desc [] = DRIVER_DESC;
+
+static dev_t g_printer_devno;
+
+static struct class *usb_gadget_class;
+
+/*-------------------------------------------------------------------------*/
+
+struct printer_dev {
+	spinlock_t		lock;		/* lock this structure */
+	/* lock buffer lists during read/write calls */
+	struct mutex		lock_printer_io;
+	struct usb_gadget	*gadget;
+	struct usb_request	*req;		/* for control responses */
+	u8			config;
+	s8			interface;
+	struct usb_ep		*in_ep, *out_ep;
+
+	struct list_head	rx_reqs;	/* List of free RX structs */
+	struct list_head	rx_reqs_active;	/* List of Active RX xfers */
+	struct list_head	rx_buffers;	/* List of completed xfers */
+	/* wait until there is data to be read. */
+	wait_queue_head_t	rx_wait;
+	struct list_head	tx_reqs;	/* List of free TX structs */
+	struct list_head	tx_reqs_active; /* List of Active TX xfers */
+	/* Wait until there are write buffers available to use. */
+	wait_queue_head_t	tx_wait;
+	/* Wait until all write buffers have been sent. */
+	wait_queue_head_t	tx_flush_wait;
+	struct usb_request	*current_rx_req;
+	size_t			current_rx_bytes;
+	u8			*current_rx_buf;
+	u8			printer_status;
+	u8			reset_printer;
+	struct cdev		printer_cdev;
+	struct device		*pdev;
+	u8			printer_cdev_open;
+	wait_queue_head_t	wait;
+};
+
+static struct printer_dev usb_printer_gadget;
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ */
+#define PRINTER_VENDOR_NUM	0x0525		/* NetChip */
+#define PRINTER_PRODUCT_NUM	0xa4a8		/* Linux-USB Printer Gadget */
+
+/* Some systems will want different product identifiers published in the
+ * device descriptor, either numbers or strings or both.  These string
+ * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
+ */
+
+static ushort idVendor;
+module_param(idVendor, ushort, S_IRUGO);
+MODULE_PARM_DESC(idVendor, "USB Vendor ID");
+
+static ushort idProduct;
+module_param(idProduct, ushort, S_IRUGO);
+MODULE_PARM_DESC(idProduct, "USB Product ID");
+
+static ushort bcdDevice;
+module_param(bcdDevice, ushort, S_IRUGO);
+MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
+
+static char *iManufacturer;
+module_param(iManufacturer, charp, S_IRUGO);
+MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
+
+static char *iProduct;
+module_param(iProduct, charp, S_IRUGO);
+MODULE_PARM_DESC(iProduct, "USB Product string");
+
+static char *iSerialNum;
+module_param(iSerialNum, charp, S_IRUGO);
+MODULE_PARM_DESC(iSerialNum, "1");
+
+static char *iPNPstring;
+module_param(iPNPstring, charp, S_IRUGO);
+MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
+
+/* Number of requests to allocate per endpoint, not used for ep0. */
+static unsigned qlen = 10;
+module_param(qlen, uint, S_IRUGO|S_IWUSR);
+
+#define QLEN	qlen
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+#define DEVSPEED	USB_SPEED_HIGH
+#else   /* full speed (low speed doesn't do bulk) */
+#define DEVSPEED        USB_SPEED_FULL
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(d, level, fmt, args...) \
+	printk(level "%s: " fmt, DRIVER_DESC, ## args)
+
+#ifdef DEBUG
+#define DBG(dev, fmt, args...) \
+	xprintk(dev, KERN_DEBUG, fmt, ## args)
+#else
+#define DBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDBG(dev, fmt, args...) \
+	xprintk(dev, KERN_DEBUG, fmt, ## args)
+#else
+#define VDBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* VERBOSE */
+
+#define ERROR(dev, fmt, args...) \
+	xprintk(dev, KERN_ERR, fmt, ## args)
+#define WARNING(dev, fmt, args...) \
+	xprintk(dev, KERN_WARNING, fmt, ## args)
+#define INFO(dev, fmt, args...) \
+	xprintk(dev, KERN_INFO, fmt, ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* USB DRIVER HOOKUP (to the hardware driver, below us), mostly
+ * ep0 implementation:  descriptors, config management, setup().
+ * also optional class-specific notification interrupt transfer.
+ */
+
+/*
+ * DESCRIPTORS ... most are static, but strings and (full) configuration
+ * descriptors are built on demand.
+ */
+
+#define STRING_MANUFACTURER		1
+#define STRING_PRODUCT			2
+#define STRING_SERIALNUM		3
+
+/* holds our biggest descriptor */
+#define USB_DESC_BUFSIZE		256
+#define USB_BUFSIZE			8192
+
+/* This device advertises one configuration. */
+#define DEV_CONFIG_VALUE		1
+#define	PRINTER_INTERFACE		0
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	.idVendor =		cpu_to_le16(PRINTER_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(PRINTER_PRODUCT_NUM),
+	.iManufacturer =	STRING_MANUFACTURER,
+	.iProduct =		STRING_PRODUCT,
+	.iSerialNumber =	STRING_SERIALNUM,
+	.bNumConfigurations =	1
+};
+
+static struct usb_otg_descriptor otg_desc = {
+	.bLength =		sizeof otg_desc,
+	.bDescriptorType =	USB_DT_OTG,
+	.bmAttributes =		USB_OTG_SRP
+};
+
+static struct usb_config_descriptor config_desc = {
+	.bLength =		sizeof config_desc,
+	.bDescriptorType =	USB_DT_CONFIG,
+
+	/* compute wTotalLength on the fly */
+	.bNumInterfaces =	1,
+	.bConfigurationValue =	DEV_CONFIG_VALUE,
+	.iConfiguration =	0,
+	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower =		CONFIG_USB_GADGET_VBUS_DRAW / 2,
+};
+
+static struct usb_interface_descriptor intf_desc = {
+	.bLength =		sizeof intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bInterfaceNumber =	PRINTER_INTERFACE,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_PRINTER,
+	.bInterfaceSubClass =	1,	/* Printer Sub-Class */
+	.bInterfaceProtocol =	2,	/* Bi-Directional */
+	.iInterface =		0
+};
+
+static struct usb_endpoint_descriptor fs_ep_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK
+};
+
+static struct usb_endpoint_descriptor fs_ep_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK
+};
+
+static const struct usb_descriptor_header *fs_printer_function [11] = {
+	(struct usb_descriptor_header *) &otg_desc,
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_ep_in_desc,
+	(struct usb_descriptor_header *) &fs_ep_out_desc,
+	NULL
+};
+
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+
+/*
+ * usb 2.0 devices need to expose both high speed and full speed
+ * descriptors, unless they only run at full speed.
+ */
+
+static struct usb_endpoint_descriptor hs_ep_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512)
+};
+
+static struct usb_endpoint_descriptor hs_ep_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512)
+};
+
+static struct usb_qualifier_descriptor dev_qualifier = {
+	.bLength =		sizeof dev_qualifier,
+	.bDescriptorType =	USB_DT_DEVICE_QUALIFIER,
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PRINTER,
+	.bNumConfigurations =	1
+};
+
+static const struct usb_descriptor_header *hs_printer_function [11] = {
+	(struct usb_descriptor_header *) &otg_desc,
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_ep_in_desc,
+	(struct usb_descriptor_header *) &hs_ep_out_desc,
+	NULL
+};
+
+/* maxpacket and other transfer characteristics vary by speed. */
+#define ep_desc(g, hs, fs) (((g)->speed == USB_SPEED_HIGH)?(hs):(fs))
+
+#else
+
+/* if there's no high speed support, maxpacket doesn't change. */
+#define ep_desc(g, hs, fs) (((void)(g)), (fs))
+
+#endif	/* !CONFIG_USB_GADGET_DUALSPEED */
+
+/*-------------------------------------------------------------------------*/
+
+/* descriptors that are built on-demand */
+
+static char				manufacturer [50];
+static char				product_desc [40] = DRIVER_DESC;
+static char				serial_num [40] = "1";
+static char				pnp_string [1024] =
+	"XXMFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;";
+
+/* static strings, in UTF-8 */
+static struct usb_string		strings [] = {
+	{ STRING_MANUFACTURER,	manufacturer, },
+	{ STRING_PRODUCT,	product_desc, },
+	{ STRING_SERIALNUM,	serial_num, },
+	{  }		/* end of list */
+};
+
+static struct usb_gadget_strings	stringtab = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
+{
+	struct usb_request	*req;
+
+	req = usb_ep_alloc_request(ep, gfp_flags);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, gfp_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			return NULL;
+		}
+	}
+
+	return req;
+}
+
+static void
+printer_req_free(struct usb_ep *ep, struct usb_request *req)
+{
+	if (ep != NULL && req != NULL) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct printer_dev	*dev = ep->driver_data;
+	int			status = req->status;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	list_del_init(&req->list);	/* Remode from Active List */
+
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		if (req->actual > 0) {
+			list_add_tail(&req->list, &dev->rx_buffers);
+			DBG(dev, "G_Printer : rx length %d\n", req->actual);
+		} else {
+			list_add(&req->list, &dev->rx_reqs);
+		}
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		VDBG(dev, "rx shutdown, code %d\n", status);
+		list_add(&req->list, &dev->rx_reqs);
+		break;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		/* endpoint reset */
+		DBG(dev, "rx %s reset\n", ep->name);
+		list_add(&req->list, &dev->rx_reqs);
+		break;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		/* FALLTHROUGH */
+
+	default:
+		DBG(dev, "rx status %d\n", status);
+		list_add(&req->list, &dev->rx_reqs);
+		break;
+	}
+
+	wake_up_interruptible(&dev->rx_wait);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct printer_dev	*dev = ep->driver_data;
+
+	switch (req->status) {
+	default:
+		VDBG(dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		break;
+	case 0:
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	/* Take the request struct off the active list and put it on the
+	 * free list.
+	 */
+	list_del_init(&req->list);
+	list_add(&req->list, &dev->tx_reqs);
+	wake_up_interruptible(&dev->tx_wait);
+	if (likely(list_empty(&dev->tx_reqs_active)))
+		wake_up_interruptible(&dev->tx_flush_wait);
+
+	spin_unlock(&dev->lock);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+printer_open(struct inode *inode, struct file *fd)
+{
+	struct printer_dev	*dev;
+	unsigned long		flags;
+	int			ret = -EBUSY;
+
+	mutex_lock(&printer_mutex);
+	dev = container_of(inode->i_cdev, struct printer_dev, printer_cdev);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	if (!dev->printer_cdev_open) {
+		dev->printer_cdev_open = 1;
+		fd->private_data = dev;
+		ret = 0;
+		/* Change the printer status to show that it's on-line. */
+		dev->printer_status |= PRINTER_SELECTED;
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	DBG(dev, "printer_open returned %x\n", ret);
+	mutex_unlock(&printer_mutex);
+	return ret;
+}
+
+static int
+printer_close(struct inode *inode, struct file *fd)
+{
+	struct printer_dev	*dev = fd->private_data;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->printer_cdev_open = 0;
+	fd->private_data = NULL;
+	/* Change printer status to show that the printer is off-line. */
+	dev->printer_status &= ~PRINTER_SELECTED;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	DBG(dev, "printer_close\n");
+
+	return 0;
+}
+
+/* This function must be called with interrupts turned off. */
+static void
+setup_rx_reqs(struct printer_dev *dev)
+{
+	struct usb_request              *req;
+
+	while (likely(!list_empty(&dev->rx_reqs))) {
+		int error;
+
+		req = container_of(dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del_init(&req->list);
+
+		/* The USB Host sends us whatever amount of data it wants to
+		 * so we always set the length field to the full USB_BUFSIZE.
+		 * If the amount of data is more than the read() caller asked
+		 * for it will be stored in the request buffer until it is
+		 * asked for by read().
+		 */
+		req->length = USB_BUFSIZE;
+		req->complete = rx_complete;
+
+		error = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC);
+		if (error) {
+			DBG(dev, "rx submit --> %d\n", error);
+			list_add(&req->list, &dev->rx_reqs);
+			break;
+		} else {
+			list_add(&req->list, &dev->rx_reqs_active);
+		}
+	}
+}
+
+static ssize_t
+printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+{
+	struct printer_dev		*dev = fd->private_data;
+	unsigned long			flags;
+	size_t				size;
+	size_t				bytes_copied;
+	struct usb_request		*req;
+	/* This is a pointer to the current USB rx request. */
+	struct usb_request		*current_rx_req;
+	/* This is the number of bytes in the current rx buffer. */
+	size_t				current_rx_bytes;
+	/* This is a pointer to the current rx buffer. */
+	u8				*current_rx_buf;
+
+	if (len == 0)
+		return -EINVAL;
+
+	DBG(dev, "printer_read trying to read %d bytes\n", (int)len);
+
+	mutex_lock(&dev->lock_printer_io);
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* We will use this flag later to check if a printer reset happened
+	 * after we turn interrupts back on.
+	 */
+	dev->reset_printer = 0;
+
+	setup_rx_reqs(dev);
+
+	bytes_copied = 0;
+	current_rx_req = dev->current_rx_req;
+	current_rx_bytes = dev->current_rx_bytes;
+	current_rx_buf = dev->current_rx_buf;
+	dev->current_rx_req = NULL;
+	dev->current_rx_bytes = 0;
+	dev->current_rx_buf = NULL;
+
+	/* Check if there is any data in the read buffers. Please note that
+	 * current_rx_bytes is the number of bytes in the current rx buffer.
+	 * If it is zero then check if there are any other rx_buffers that
+	 * are on the completed list. We are only out of data if all rx
+	 * buffers are empty.
+	 */
+	if ((current_rx_bytes == 0) &&
+			(likely(list_empty(&dev->rx_buffers)))) {
+		/* Turn interrupts back on before sleeping. */
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		/*
+		 * If no data is available check if this is a NON-Blocking
+		 * call or not.
+		 */
+		if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) {
+			mutex_unlock(&dev->lock_printer_io);
+			return -EAGAIN;
+		}
+
+		/* Sleep until data is available */
+		wait_event_interruptible(dev->rx_wait,
+				(likely(!list_empty(&dev->rx_buffers))));
+		spin_lock_irqsave(&dev->lock, flags);
+	}
+
+	/* We have data to return then copy it to the caller's buffer.*/
+	while ((current_rx_bytes || likely(!list_empty(&dev->rx_buffers)))
+			&& len) {
+		if (current_rx_bytes == 0) {
+			req = container_of(dev->rx_buffers.next,
+					struct usb_request, list);
+			list_del_init(&req->list);
+
+			if (req->actual && req->buf) {
+				current_rx_req = req;
+				current_rx_bytes = req->actual;
+				current_rx_buf = req->buf;
+			} else {
+				list_add(&req->list, &dev->rx_reqs);
+				continue;
+			}
+		}
+
+		/* Don't leave irqs off while doing memory copies */
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		if (len > current_rx_bytes)
+			size = current_rx_bytes;
+		else
+			size = len;
+
+		size -= copy_to_user(buf, current_rx_buf, size);
+		bytes_copied += size;
+		len -= size;
+		buf += size;
+
+		spin_lock_irqsave(&dev->lock, flags);
+
+		/* We've disconnected or reset so return. */
+		if (dev->reset_printer) {
+			list_add(&current_rx_req->list, &dev->rx_reqs);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			mutex_unlock(&dev->lock_printer_io);
+			return -EAGAIN;
+		}
+
+		/* If we not returning all the data left in this RX request
+		 * buffer then adjust the amount of data left in the buffer.
+		 * Othewise if we are done with this RX request buffer then
+		 * requeue it to get any incoming data from the USB host.
+		 */
+		if (size < current_rx_bytes) {
+			current_rx_bytes -= size;
+			current_rx_buf += size;
+		} else {
+			list_add(&current_rx_req->list, &dev->rx_reqs);
+			current_rx_bytes = 0;
+			current_rx_buf = NULL;
+			current_rx_req = NULL;
+		}
+	}
+
+	dev->current_rx_req = current_rx_req;
+	dev->current_rx_bytes = current_rx_bytes;
+	dev->current_rx_buf = current_rx_buf;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock_printer_io);
+
+	DBG(dev, "printer_read returned %d bytes\n", (int)bytes_copied);
+
+	if (bytes_copied)
+		return bytes_copied;
+	else
+		return -EAGAIN;
+}
+
+static ssize_t
+printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+{
+	struct printer_dev	*dev = fd->private_data;
+	unsigned long		flags;
+	size_t			size;	/* Amount of data in a TX request. */
+	size_t			bytes_copied = 0;
+	struct usb_request	*req;
+
+	DBG(dev, "printer_write trying to send %d bytes\n", (int)len);
+
+	if (len == 0)
+		return -EINVAL;
+
+	mutex_lock(&dev->lock_printer_io);
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* Check if a printer reset happens while we have interrupts on */
+	dev->reset_printer = 0;
+
+	/* Check if there is any available write buffers */
+	if (likely(list_empty(&dev->tx_reqs))) {
+		/* Turn interrupts back on before sleeping. */
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		/*
+		 * If write buffers are available check if this is
+		 * a NON-Blocking call or not.
+		 */
+		if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) {
+			mutex_unlock(&dev->lock_printer_io);
+			return -EAGAIN;
+		}
+
+		/* Sleep until a write buffer is available */
+		wait_event_interruptible(dev->tx_wait,
+				(likely(!list_empty(&dev->tx_reqs))));
+		spin_lock_irqsave(&dev->lock, flags);
+	}
+
+	while (likely(!list_empty(&dev->tx_reqs)) && len) {
+
+		if (len > USB_BUFSIZE)
+			size = USB_BUFSIZE;
+		else
+			size = len;
+
+		req = container_of(dev->tx_reqs.next, struct usb_request,
+				list);
+		list_del_init(&req->list);
+
+		req->complete = tx_complete;
+		req->length = size;
+
+		/* Check if we need to send a zero length packet. */
+		if (len > size)
+			/* They will be more TX requests so no yet. */
+			req->zero = 0;
+		else
+			/* If the data amount is not a multple of the
+			 * maxpacket size then send a zero length packet.
+			 */
+			req->zero = ((len % dev->in_ep->maxpacket) == 0);
+
+		/* Don't leave irqs off while doing memory copies */
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		if (copy_from_user(req->buf, buf, size)) {
+			list_add(&req->list, &dev->tx_reqs);
+			mutex_unlock(&dev->lock_printer_io);
+			return bytes_copied;
+		}
+
+		bytes_copied += size;
+		len -= size;
+		buf += size;
+
+		spin_lock_irqsave(&dev->lock, flags);
+
+		/* We've disconnected or reset so free the req and buffer */
+		if (dev->reset_printer) {
+			list_add(&req->list, &dev->tx_reqs);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			mutex_unlock(&dev->lock_printer_io);
+			return -EAGAIN;
+		}
+
+		if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) {
+			list_add(&req->list, &dev->tx_reqs);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			mutex_unlock(&dev->lock_printer_io);
+			return -EAGAIN;
+		}
+
+		list_add(&req->list, &dev->tx_reqs_active);
+
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock_printer_io);
+
+	DBG(dev, "printer_write sent %d bytes\n", (int)bytes_copied);
+
+	if (bytes_copied) {
+		return bytes_copied;
+	} else {
+		return -EAGAIN;
+	}
+}
+
+static int
+printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
+{
+	struct printer_dev	*dev = fd->private_data;
+	struct inode *inode = fd->f_path.dentry->d_inode;
+	unsigned long		flags;
+	int			tx_list_empty;
+
+	mutex_lock(&inode->i_mutex);
+	spin_lock_irqsave(&dev->lock, flags);
+	tx_list_empty = (likely(list_empty(&dev->tx_reqs)));
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (!tx_list_empty) {
+		/* Sleep until all data has been sent */
+		wait_event_interruptible(dev->tx_flush_wait,
+				(likely(list_empty(&dev->tx_reqs_active))));
+	}
+	mutex_unlock(&inode->i_mutex);
+
+	return 0;
+}
+
+static unsigned int
+printer_poll(struct file *fd, poll_table *wait)
+{
+	struct printer_dev	*dev = fd->private_data;
+	unsigned long		flags;
+	int			status = 0;
+
+	mutex_lock(&dev->lock_printer_io);
+	spin_lock_irqsave(&dev->lock, flags);
+	setup_rx_reqs(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock_printer_io);
+
+	poll_wait(fd, &dev->rx_wait, wait);
+	poll_wait(fd, &dev->tx_wait, wait);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (likely(!list_empty(&dev->tx_reqs)))
+		status |= POLLOUT | POLLWRNORM;
+
+	if (likely(dev->current_rx_bytes) ||
+			likely(!list_empty(&dev->rx_buffers)))
+		status |= POLLIN | POLLRDNORM;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return status;
+}
+
+static long
+printer_ioctl(struct file *fd, unsigned int code, unsigned long arg)
+{
+	struct printer_dev	*dev = fd->private_data;
+	unsigned long		flags;
+	int			status = 0;
+
+	DBG(dev, "printer_ioctl: cmd=0x%4.4x, arg=%lu\n", code, arg);
+
+	/* handle ioctls */
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	switch (code) {
+	case GADGET_GET_PRINTER_STATUS:
+		status = (int)dev->printer_status;
+		break;
+	case GADGET_SET_PRINTER_STATUS:
+		dev->printer_status = (u8)arg;
+		break;
+	default:
+		/* could not handle ioctl */
+		DBG(dev, "printer_ioctl: ERROR cmd=0x%4.4xis not supported\n",
+				code);
+		status = -ENOTTY;
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return status;
+}
+
+/* used after endpoint configuration */
+static const struct file_operations printer_io_operations = {
+	.owner =	THIS_MODULE,
+	.open =		printer_open,
+	.read =		printer_read,
+	.write =	printer_write,
+	.fsync =	printer_fsync,
+	.poll =		printer_poll,
+	.unlocked_ioctl = printer_ioctl,
+	.release =	printer_close,
+	.llseek =	noop_llseek,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int
+set_printer_interface(struct printer_dev *dev)
+{
+	int			result = 0;
+
+	dev->in_ep->desc = ep_desc(dev->gadget, &hs_ep_in_desc, &fs_ep_in_desc);
+	dev->in_ep->driver_data = dev;
+
+	dev->out_ep->desc = ep_desc(dev->gadget, &hs_ep_out_desc,
+				    &fs_ep_out_desc);
+	dev->out_ep->driver_data = dev;
+
+	result = usb_ep_enable(dev->in_ep);
+	if (result != 0) {
+		DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result);
+		goto done;
+	}
+
+	result = usb_ep_enable(dev->out_ep);
+	if (result != 0) {
+		DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result);
+		goto done;
+	}
+
+done:
+	/* on error, disable any endpoints  */
+	if (result != 0) {
+		(void) usb_ep_disable(dev->in_ep);
+		(void) usb_ep_disable(dev->out_ep);
+		dev->in_ep->desc = NULL;
+		dev->out_ep->desc = NULL;
+	}
+
+	/* caller is responsible for cleanup on error */
+	return result;
+}
+
+static void printer_reset_interface(struct printer_dev *dev)
+{
+	if (dev->interface < 0)
+		return;
+
+	DBG(dev, "%s\n", __func__);
+
+	if (dev->in_ep->desc)
+		usb_ep_disable(dev->in_ep);
+
+	if (dev->out_ep->desc)
+		usb_ep_disable(dev->out_ep);
+
+	dev->in_ep->desc = NULL;
+	dev->out_ep->desc = NULL;
+	dev->interface = -1;
+}
+
+/* change our operational config.  must agree with the code
+ * that returns config descriptors, and altsetting code.
+ */
+static int
+printer_set_config(struct printer_dev *dev, unsigned number)
+{
+	int			result = 0;
+	struct usb_gadget	*gadget = dev->gadget;
+
+	switch (number) {
+	case DEV_CONFIG_VALUE:
+		result = 0;
+		break;
+	default:
+		result = -EINVAL;
+		/* FALL THROUGH */
+	case 0:
+		break;
+	}
+
+	if (result) {
+		usb_gadget_vbus_draw(dev->gadget,
+				dev->gadget->is_otg ? 8 : 100);
+	} else {
+		unsigned power;
+
+		power = 2 * config_desc.bMaxPower;
+		usb_gadget_vbus_draw(dev->gadget, power);
+
+		dev->config = number;
+		INFO(dev, "%s config #%d: %d mA, %s\n",
+		     usb_speed_string(gadget->speed),
+		     number, power, driver_desc);
+	}
+	return result;
+}
+
+static int
+config_buf(enum usb_device_speed speed, u8 *buf, u8 type, unsigned index,
+		int is_otg)
+{
+	int					len;
+	const struct usb_descriptor_header	**function;
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	int					hs = (speed == USB_SPEED_HIGH);
+
+	if (type == USB_DT_OTHER_SPEED_CONFIG)
+		hs = !hs;
+
+	if (hs) {
+		function = hs_printer_function;
+	} else {
+		function = fs_printer_function;
+	}
+#else
+	function = fs_printer_function;
+#endif
+
+	if (index >= device_desc.bNumConfigurations)
+		return -EINVAL;
+
+	/* for now, don't advertise srp-only devices */
+	if (!is_otg)
+		function++;
+
+	len = usb_gadget_config_buf(&config_desc, buf, USB_DESC_BUFSIZE,
+			function);
+	if (len < 0)
+		return len;
+	((struct usb_config_descriptor *) buf)->bDescriptorType = type;
+	return len;
+}
+
+/* Change our operational Interface. */
+static int
+set_interface(struct printer_dev *dev, unsigned number)
+{
+	int			result = 0;
+
+	/* Free the current interface */
+	switch (dev->interface) {
+	case PRINTER_INTERFACE:
+		printer_reset_interface(dev);
+		break;
+	}
+
+	switch (number) {
+	case PRINTER_INTERFACE:
+		result = set_printer_interface(dev);
+		if (result) {
+			printer_reset_interface(dev);
+		} else {
+			dev->interface = PRINTER_INTERFACE;
+		}
+		break;
+	default:
+		result = -EINVAL;
+		/* FALL THROUGH */
+	}
+
+	if (!result)
+		INFO(dev, "Using interface %x\n", number);
+
+	return result;
+}
+
+static void printer_setup_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	if (req->status || req->actual != req->length)
+		DBG((struct printer_dev *) ep->driver_data,
+				"setup complete --> %d, %d/%d\n",
+				req->status, req->actual, req->length);
+}
+
+static void printer_soft_reset(struct printer_dev *dev)
+{
+	struct usb_request	*req;
+
+	INFO(dev, "Received Printer Reset Request\n");
+
+	if (usb_ep_disable(dev->in_ep))
+		DBG(dev, "Failed to disable USB in_ep\n");
+	if (usb_ep_disable(dev->out_ep))
+		DBG(dev, "Failed to disable USB out_ep\n");
+
+	if (dev->current_rx_req != NULL) {
+		list_add(&dev->current_rx_req->list, &dev->rx_reqs);
+		dev->current_rx_req = NULL;
+	}
+	dev->current_rx_bytes = 0;
+	dev->current_rx_buf = NULL;
+	dev->reset_printer = 1;
+
+	while (likely(!(list_empty(&dev->rx_buffers)))) {
+		req = container_of(dev->rx_buffers.next, struct usb_request,
+				list);
+		list_del_init(&req->list);
+		list_add(&req->list, &dev->rx_reqs);
+	}
+
+	while (likely(!(list_empty(&dev->rx_reqs_active)))) {
+		req = container_of(dev->rx_buffers.next, struct usb_request,
+				list);
+		list_del_init(&req->list);
+		list_add(&req->list, &dev->rx_reqs);
+	}
+
+	while (likely(!(list_empty(&dev->tx_reqs_active)))) {
+		req = container_of(dev->tx_reqs_active.next,
+				struct usb_request, list);
+		list_del_init(&req->list);
+		list_add(&req->list, &dev->tx_reqs);
+	}
+
+	if (usb_ep_enable(dev->in_ep))
+		DBG(dev, "Failed to enable USB in_ep\n");
+	if (usb_ep_enable(dev->out_ep))
+		DBG(dev, "Failed to enable USB out_ep\n");
+
+	wake_up_interruptible(&dev->rx_wait);
+	wake_up_interruptible(&dev->tx_wait);
+	wake_up_interruptible(&dev->tx_flush_wait);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The setup() callback implements all the ep0 functionality that's not
+ * handled lower down.
+ */
+static int
+printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+{
+	struct printer_dev	*dev = get_gadget_data(gadget);
+	struct usb_request	*req = dev->req;
+	int			value = -EOPNOTSUPP;
+	u16			wIndex = le16_to_cpu(ctrl->wIndex);
+	u16			wValue = le16_to_cpu(ctrl->wValue);
+	u16			wLength = le16_to_cpu(ctrl->wLength);
+
+	DBG(dev, "ctrl req%02x.%02x v%04x i%04x l%d\n",
+		ctrl->bRequestType, ctrl->bRequest, wValue, wIndex, wLength);
+
+	req->complete = printer_setup_complete;
+
+	switch (ctrl->bRequestType&USB_TYPE_MASK) {
+
+	case USB_TYPE_STANDARD:
+		switch (ctrl->bRequest) {
+
+		case USB_REQ_GET_DESCRIPTOR:
+			if (ctrl->bRequestType != USB_DIR_IN)
+				break;
+			switch (wValue >> 8) {
+
+			case USB_DT_DEVICE:
+				device_desc.bMaxPacketSize0 =
+					gadget->ep0->maxpacket;
+				value = min(wLength, (u16) sizeof device_desc);
+				memcpy(req->buf, &device_desc, value);
+				break;
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+			case USB_DT_DEVICE_QUALIFIER:
+				if (!gadget_is_dualspeed(gadget))
+					break;
+				/*
+				 * assumes ep0 uses the same value for both
+				 * speeds
+				 */
+				dev_qualifier.bMaxPacketSize0 =
+					gadget->ep0->maxpacket;
+				value = min(wLength,
+						(u16) sizeof dev_qualifier);
+				memcpy(req->buf, &dev_qualifier, value);
+				break;
+
+			case USB_DT_OTHER_SPEED_CONFIG:
+				if (!gadget_is_dualspeed(gadget))
+					break;
+				/* FALLTHROUGH */
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+			case USB_DT_CONFIG:
+				value = config_buf(gadget->speed, req->buf,
+						wValue >> 8,
+						wValue & 0xff,
+						gadget->is_otg);
+				if (value >= 0)
+					value = min(wLength, (u16) value);
+				break;
+
+			case USB_DT_STRING:
+				value = usb_gadget_get_string(&stringtab,
+						wValue & 0xff, req->buf);
+				if (value >= 0)
+					value = min(wLength, (u16) value);
+				break;
+			}
+			break;
+
+		case USB_REQ_SET_CONFIGURATION:
+			if (ctrl->bRequestType != 0)
+				break;
+			if (gadget->a_hnp_support)
+				DBG(dev, "HNP available\n");
+			else if (gadget->a_alt_hnp_support)
+				DBG(dev, "HNP needs a different root port\n");
+			value = printer_set_config(dev, wValue);
+			if (!value)
+				value = set_interface(dev, PRINTER_INTERFACE);
+			break;
+		case USB_REQ_GET_CONFIGURATION:
+			if (ctrl->bRequestType != USB_DIR_IN)
+				break;
+			*(u8 *)req->buf = dev->config;
+			value = min(wLength, (u16) 1);
+			break;
+
+		case USB_REQ_SET_INTERFACE:
+			if (ctrl->bRequestType != USB_RECIP_INTERFACE ||
+					!dev->config)
+				break;
+
+			value = set_interface(dev, PRINTER_INTERFACE);
+			break;
+		case USB_REQ_GET_INTERFACE:
+			if (ctrl->bRequestType !=
+					(USB_DIR_IN|USB_RECIP_INTERFACE)
+					|| !dev->config)
+				break;
+
+			*(u8 *)req->buf = dev->interface;
+			value = min(wLength, (u16) 1);
+			break;
+
+		default:
+			goto unknown;
+		}
+		break;
+
+	case USB_TYPE_CLASS:
+		switch (ctrl->bRequest) {
+		case 0: /* Get the IEEE-1284 PNP String */
+			/* Only one printer interface is supported. */
+			if ((wIndex>>8) != PRINTER_INTERFACE)
+				break;
+
+			value = (pnp_string[0]<<8)|pnp_string[1];
+			memcpy(req->buf, pnp_string, value);
+			DBG(dev, "1284 PNP String: %x %s\n", value,
+					&pnp_string[2]);
+			break;
+
+		case 1: /* Get Port Status */
+			/* Only one printer interface is supported. */
+			if (wIndex != PRINTER_INTERFACE)
+				break;
+
+			*(u8 *)req->buf = dev->printer_status;
+			value = min(wLength, (u16) 1);
+			break;
+
+		case 2: /* Soft Reset */
+			/* Only one printer interface is supported. */
+			if (wIndex != PRINTER_INTERFACE)
+				break;
+
+			printer_soft_reset(dev);
+
+			value = 0;
+			break;
+
+		default:
+			goto unknown;
+		}
+		break;
+
+	default:
+unknown:
+		VDBG(dev,
+			"unknown ctrl req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+		break;
+	}
+
+	/* respond with data transfer before status phase? */
+	if (value >= 0) {
+		req->length = value;
+		req->zero = value < wLength;
+		value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DBG(dev, "ep_queue --> %d\n", value);
+			req->status = 0;
+			printer_setup_complete(gadget->ep0, req);
+		}
+	}
+
+	/* host either stalls (value < 0) or reports success */
+	return value;
+}
+
+static void
+printer_disconnect(struct usb_gadget *gadget)
+{
+	struct printer_dev	*dev = get_gadget_data(gadget);
+	unsigned long		flags;
+
+	DBG(dev, "%s\n", __func__);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	printer_reset_interface(dev);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void
+printer_unbind(struct usb_gadget *gadget)
+{
+	struct printer_dev	*dev = get_gadget_data(gadget);
+	struct usb_request	*req;
+
+
+	DBG(dev, "%s\n", __func__);
+
+	/* Remove sysfs files */
+	device_destroy(usb_gadget_class, g_printer_devno);
+
+	/* Remove Character Device */
+	cdev_del(&dev->printer_cdev);
+
+	/* we must already have been disconnected ... no i/o may be active */
+	WARN_ON(!list_empty(&dev->tx_reqs_active));
+	WARN_ON(!list_empty(&dev->rx_reqs_active));
+
+	/* Free all memory for this driver. */
+	while (!list_empty(&dev->tx_reqs)) {
+		req = container_of(dev->tx_reqs.next, struct usb_request,
+				list);
+		list_del(&req->list);
+		printer_req_free(dev->in_ep, req);
+	}
+
+	if (dev->current_rx_req != NULL)
+		printer_req_free(dev->out_ep, dev->current_rx_req);
+
+	while (!list_empty(&dev->rx_reqs)) {
+		req = container_of(dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del(&req->list);
+		printer_req_free(dev->out_ep, req);
+	}
+
+	while (!list_empty(&dev->rx_buffers)) {
+		req = container_of(dev->rx_buffers.next,
+				struct usb_request, list);
+		list_del(&req->list);
+		printer_req_free(dev->out_ep, req);
+	}
+
+	if (dev->req) {
+		printer_req_free(gadget->ep0, dev->req);
+		dev->req = NULL;
+	}
+
+	set_gadget_data(gadget, NULL);
+}
+
+static int __init
+printer_bind(struct usb_gadget *gadget)
+{
+	struct printer_dev	*dev;
+	struct usb_ep		*in_ep, *out_ep;
+	int			status = -ENOMEM;
+	int			gcnum;
+	size_t			len;
+	u32			i;
+	struct usb_request	*req;
+
+	dev = &usb_printer_gadget;
+
+
+	/* Setup the sysfs files for the printer gadget. */
+	dev->pdev = device_create(usb_gadget_class, NULL, g_printer_devno,
+				  NULL, "g_printer");
+	if (IS_ERR(dev->pdev)) {
+		ERROR(dev, "Failed to create device: g_printer\n");
+		goto fail;
+	}
+
+	/*
+	 * Register a character device as an interface to a user mode
+	 * program that handles the printer specific functionality.
+	 */
+	cdev_init(&dev->printer_cdev, &printer_io_operations);
+	dev->printer_cdev.owner = THIS_MODULE;
+	status = cdev_add(&dev->printer_cdev, g_printer_devno, 1);
+	if (status) {
+		ERROR(dev, "Failed to open char device\n");
+		goto fail;
+	}
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0) {
+		device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
+	} else {
+		dev_warn(&gadget->dev, "controller '%s' not recognized\n",
+			gadget->name);
+		/* unrecognized, but safe unless bulk is REALLY quirky */
+		device_desc.bcdDevice =
+			cpu_to_le16(0xFFFF);
+	}
+	snprintf(manufacturer, sizeof(manufacturer), "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+
+	device_desc.idVendor =
+		cpu_to_le16(PRINTER_VENDOR_NUM);
+	device_desc.idProduct =
+		cpu_to_le16(PRINTER_PRODUCT_NUM);
+
+	/* support optional vendor/distro customization */
+	if (idVendor) {
+		if (!idProduct) {
+			dev_err(&gadget->dev, "idVendor needs idProduct!\n");
+			return -ENODEV;
+		}
+		device_desc.idVendor = cpu_to_le16(idVendor);
+		device_desc.idProduct = cpu_to_le16(idProduct);
+		if (bcdDevice)
+			device_desc.bcdDevice = cpu_to_le16(bcdDevice);
+	}
+
+	if (iManufacturer)
+		strlcpy(manufacturer, iManufacturer, sizeof manufacturer);
+
+	if (iProduct)
+		strlcpy(product_desc, iProduct, sizeof product_desc);
+
+	if (iSerialNum)
+		strlcpy(serial_num, iSerialNum, sizeof serial_num);
+
+	if (iPNPstring)
+		strlcpy(&pnp_string[2], iPNPstring, (sizeof pnp_string)-2);
+
+	len = strlen(pnp_string);
+	pnp_string[0] = (len >> 8) & 0xFF;
+	pnp_string[1] = len & 0xFF;
+
+	/* all we really need is bulk IN/OUT */
+	usb_ep_autoconfig_reset(gadget);
+	in_ep = usb_ep_autoconfig(gadget, &fs_ep_in_desc);
+	if (!in_ep) {
+autoconf_fail:
+		dev_err(&gadget->dev, "can't autoconfigure on %s\n",
+			gadget->name);
+		return -ENODEV;
+	}
+	in_ep->driver_data = in_ep;	/* claim */
+
+	out_ep = usb_ep_autoconfig(gadget, &fs_ep_out_desc);
+	if (!out_ep)
+		goto autoconf_fail;
+	out_ep->driver_data = out_ep;	/* claim */
+
+#ifdef	CONFIG_USB_GADGET_DUALSPEED
+	/* assumes that all endpoints are dual-speed */
+	hs_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress;
+	hs_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
+#endif	/* DUALSPEED */
+
+	usb_gadget_set_selfpowered(gadget);
+
+	if (gadget->is_otg) {
+		otg_desc.bmAttributes |= USB_OTG_HNP,
+		config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	spin_lock_init(&dev->lock);
+	mutex_init(&dev->lock_printer_io);
+	INIT_LIST_HEAD(&dev->tx_reqs);
+	INIT_LIST_HEAD(&dev->tx_reqs_active);
+	INIT_LIST_HEAD(&dev->rx_reqs);
+	INIT_LIST_HEAD(&dev->rx_reqs_active);
+	INIT_LIST_HEAD(&dev->rx_buffers);
+	init_waitqueue_head(&dev->rx_wait);
+	init_waitqueue_head(&dev->tx_wait);
+	init_waitqueue_head(&dev->tx_flush_wait);
+
+	dev->config = 0;
+	dev->interface = -1;
+	dev->printer_cdev_open = 0;
+	dev->printer_status = PRINTER_NOT_ERROR;
+	dev->current_rx_req = NULL;
+	dev->current_rx_bytes = 0;
+	dev->current_rx_buf = NULL;
+
+	dev->in_ep = in_ep;
+	dev->out_ep = out_ep;
+
+	/* preallocate control message data and buffer */
+	dev->req = printer_req_alloc(gadget->ep0, USB_DESC_BUFSIZE,
+			GFP_KERNEL);
+	if (!dev->req) {
+		status = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < QLEN; i++) {
+		req = printer_req_alloc(dev->in_ep, USB_BUFSIZE, GFP_KERNEL);
+		if (!req) {
+			while (!list_empty(&dev->tx_reqs)) {
+				req = container_of(dev->tx_reqs.next,
+						struct usb_request, list);
+				list_del(&req->list);
+				printer_req_free(dev->in_ep, req);
+			}
+			return -ENOMEM;
+		}
+		list_add(&req->list, &dev->tx_reqs);
+	}
+
+	for (i = 0; i < QLEN; i++) {
+		req = printer_req_alloc(dev->out_ep, USB_BUFSIZE, GFP_KERNEL);
+		if (!req) {
+			while (!list_empty(&dev->rx_reqs)) {
+				req = container_of(dev->rx_reqs.next,
+						struct usb_request, list);
+				list_del(&req->list);
+				printer_req_free(dev->out_ep, req);
+			}
+			return -ENOMEM;
+		}
+		list_add(&req->list, &dev->rx_reqs);
+	}
+
+	dev->req->complete = printer_setup_complete;
+
+	/* finish hookup to lower layer ... */
+	dev->gadget = gadget;
+	set_gadget_data(gadget, dev);
+	gadget->ep0->driver_data = dev;
+
+	INFO(dev, "%s, version: " DRIVER_VERSION "\n", driver_desc);
+	INFO(dev, "using %s, OUT %s IN %s\n", gadget->name, out_ep->name,
+			in_ep->name);
+
+	return 0;
+
+fail:
+	printer_unbind(gadget);
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_gadget_driver printer_driver = {
+	.max_speed	= DEVSPEED,
+
+	.function	= (char *) driver_desc,
+	.unbind		= printer_unbind,
+
+	.setup		= printer_setup,
+	.disconnect	= printer_disconnect,
+
+	.driver		= {
+		.name		= (char *) shortname,
+		.owner		= THIS_MODULE,
+	},
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Craig Nadler");
+MODULE_LICENSE("GPL");
+
+static int __init
+init(void)
+{
+	int status;
+
+	usb_gadget_class = class_create(THIS_MODULE, "usb_printer_gadget");
+	if (IS_ERR(usb_gadget_class)) {
+		status = PTR_ERR(usb_gadget_class);
+		ERROR(dev, "unable to create usb_gadget class %d\n", status);
+		return status;
+	}
+
+	status = alloc_chrdev_region(&g_printer_devno, 0, 1,
+			"USB printer gadget");
+	if (status) {
+		ERROR(dev, "alloc_chrdev_region %d\n", status);
+		class_destroy(usb_gadget_class);
+		return status;
+	}
+
+	status = usb_gadget_probe_driver(&printer_driver, printer_bind);
+	if (status) {
+		class_destroy(usb_gadget_class);
+		unregister_chrdev_region(g_printer_devno, 1);
+		DBG(dev, "usb_gadget_probe_driver %x\n", status);
+	}
+
+	return status;
+}
+module_init(init);
+
+static void __exit
+cleanup(void)
+{
+	int status;
+
+	mutex_lock(&usb_printer_gadget.lock_printer_io);
+	status = usb_gadget_unregister_driver(&printer_driver);
+	if (status)
+		ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
+
+	unregister_chrdev_region(g_printer_devno, 1);
+	class_destroy(usb_gadget_class);
+	mutex_unlock(&usb_printer_gadget.lock_printer_io);
+}
+module_exit(cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa25x_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa25x_udc.c
new file mode 100644
index 0000000..41ed69c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa25x_udc.c
@@ -0,0 +1,2373 @@
+/*
+ * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
+ * Copyright (C) 2003 Robert Schwebel, Pengutronix
+ * Copyright (C) 2003 Benedikt Spranger, Pengutronix
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003 Joshua Wise
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/prefetch.h>
+
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/gpio.h>
+#include <asm/mach-types.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+
+/*
+ * This driver is PXA25x only.  Grab the right register definitions.
+ */
+#ifdef CONFIG_ARCH_PXA
+#include <mach/pxa25x-udc.h>
+#endif
+
+#ifdef CONFIG_ARCH_LUBBOCK
+#include <mach/lubbock.h>
+#endif
+
+#include <asm/mach/udc_pxa2xx.h>
+
+
+/*
+ * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
+ * series processors.  The UDC for the IXP 4xx series is very similar.
+ * There are fifteen endpoints, in addition to ep0.
+ *
+ * Such controller drivers work with a gadget driver.  The gadget driver
+ * returns descriptors, implements configuration and data protocols used
+ * by the host to interact with this device, and allocates endpoints to
+ * the different protocol interfaces.  The controller driver virtualizes
+ * usb hardware so that the gadget drivers will be more portable.
+ *
+ * This UDC hardware wants to implement a bit too much USB protocol, so
+ * it constrains the sorts of USB configuration change events that work.
+ * The errata for these chips are misleading; some "fixed" bugs from
+ * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
+ *
+ * Note that the UDC hardware supports DMA (except on IXP) but that's
+ * not used here.  IN-DMA (to host) is simple enough, when the data is
+ * suitably aligned (16 bytes) ... the network stack doesn't do that,
+ * other software can.  OUT-DMA is buggy in most chip versions, as well
+ * as poorly designed (data toggle not automatic).  So this driver won't
+ * bother using DMA.  (Mostly-working IN-DMA support was available in
+ * kernels before 2.6.23, but was never enabled or well tested.)
+ */
+
+#define	DRIVER_VERSION	"30-June-2007"
+#define	DRIVER_DESC	"PXA 25x USB Device Controller driver"
+
+
+static const char driver_name [] = "pxa25x_udc";
+
+static const char ep0name [] = "ep0";
+
+
+#ifdef CONFIG_ARCH_IXP4XX
+
+/* cpu-specific register addresses are compiled in to this code */
+#ifdef CONFIG_ARCH_PXA
+#error "Can't configure both IXP and PXA"
+#endif
+
+/* IXP doesn't yet support <linux/clk.h> */
+#define clk_get(dev,name)	NULL
+#define clk_enable(clk)		do { } while (0)
+#define clk_disable(clk)	do { } while (0)
+#define clk_put(clk)		do { } while (0)
+
+#endif
+
+#include "pxa25x_udc.h"
+
+
+#ifdef	CONFIG_USB_PXA25X_SMALL
+#define SIZE_STR	" (small)"
+#else
+#define SIZE_STR	""
+#endif
+
+/* ---------------------------------------------------------------------------
+ *	endpoint related parts of the api to the usb controller hardware,
+ *	used by gadget driver; and the inner talker-to-hardware core.
+ * ---------------------------------------------------------------------------
+ */
+
+static void pxa25x_ep_fifo_flush (struct usb_ep *ep);
+static void nuke (struct pxa25x_ep *, int status);
+
+/* one GPIO should control a D+ pullup, so host sees this device (or not) */
+static void pullup_off(void)
+{
+	struct pxa2xx_udc_mach_info		*mach = the_controller->mach;
+	int off_level = mach->gpio_pullup_inverted;
+
+	if (gpio_is_valid(mach->gpio_pullup))
+		gpio_set_value(mach->gpio_pullup, off_level);
+	else if (mach->udc_command)
+		mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
+}
+
+static void pullup_on(void)
+{
+	struct pxa2xx_udc_mach_info		*mach = the_controller->mach;
+	int on_level = !mach->gpio_pullup_inverted;
+
+	if (gpio_is_valid(mach->gpio_pullup))
+		gpio_set_value(mach->gpio_pullup, on_level);
+	else if (mach->udc_command)
+		mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
+}
+
+static void pio_irq_enable(int bEndpointAddress)
+{
+        bEndpointAddress &= 0xf;
+        if (bEndpointAddress < 8)
+                UICR0 &= ~(1 << bEndpointAddress);
+        else {
+                bEndpointAddress -= 8;
+                UICR1 &= ~(1 << bEndpointAddress);
+	}
+}
+
+static void pio_irq_disable(int bEndpointAddress)
+{
+        bEndpointAddress &= 0xf;
+        if (bEndpointAddress < 8)
+                UICR0 |= 1 << bEndpointAddress;
+        else {
+                bEndpointAddress -= 8;
+                UICR1 |= 1 << bEndpointAddress;
+        }
+}
+
+/* The UDCCR reg contains mask and interrupt status bits,
+ * so using '|=' isn't safe as it may ack an interrupt.
+ */
+#define UDCCR_MASK_BITS         (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
+
+static inline void udc_set_mask_UDCCR(int mask)
+{
+	UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
+}
+
+static inline void udc_clear_mask_UDCCR(int mask)
+{
+	UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
+}
+
+static inline void udc_ack_int_UDCCR(int mask)
+{
+	/* udccr contains the bits we dont want to change */
+	__u32 udccr = UDCCR & UDCCR_MASK_BITS;
+
+	UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
+}
+
+/*
+ * endpoint enable/disable
+ *
+ * we need to verify the descriptors used to enable endpoints.  since pxa25x
+ * endpoint configurations are fixed, and are pretty much always enabled,
+ * there's not a lot to manage here.
+ *
+ * because pxa25x can't selectively initialize bulk (or interrupt) endpoints,
+ * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
+ * for a single interface (with only the default altsetting) and for gadget
+ * drivers that don't halt endpoints (not reset by set_interface).  that also
+ * means that if you use ISO, you must violate the USB spec rule that all
+ * iso endpoints must be in non-default altsettings.
+ */
+static int pxa25x_ep_enable (struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct pxa25x_ep        *ep;
+	struct pxa25x_udc       *dev;
+
+	ep = container_of (_ep, struct pxa25x_ep, ep);
+	if (!_ep || !desc || ep->desc || _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT
+			|| ep->bEndpointAddress != desc->bEndpointAddress
+			|| ep->fifo_size < usb_endpoint_maxp (desc)) {
+		DMSG("%s, bad ep or descriptor\n", __func__);
+		return -EINVAL;
+	}
+
+	/* xfer types must match, except that interrupt ~= bulk */
+	if (ep->bmAttributes != desc->bmAttributes
+			&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
+			&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
+		DMSG("%s, %s type mismatch\n", __func__, _ep->name);
+		return -EINVAL;
+	}
+
+	/* hardware _could_ do smaller, but driver doesn't */
+	if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+				&& usb_endpoint_maxp (desc)
+						!= BULK_FIFO_SIZE)
+			|| !desc->wMaxPacketSize) {
+		DMSG("%s, bad %s maxpacket\n", __func__, _ep->name);
+		return -ERANGE;
+	}
+
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
+		DMSG("%s, bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	ep->desc = desc;
+	ep->stopped = 0;
+	ep->pio_irqs = 0;
+	ep->ep.maxpacket = usb_endpoint_maxp (desc);
+
+	/* flush fifo (mostly for OUT buffers) */
+	pxa25x_ep_fifo_flush (_ep);
+
+	/* ... reset halt state too, if we could ... */
+
+	DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
+	return 0;
+}
+
+static int pxa25x_ep_disable (struct usb_ep *_ep)
+{
+	struct pxa25x_ep	*ep;
+	unsigned long		flags;
+
+	ep = container_of (_ep, struct pxa25x_ep, ep);
+	if (!_ep || !ep->desc) {
+		DMSG("%s, %s not enabled\n", __func__,
+			_ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+	local_irq_save(flags);
+
+	nuke (ep, -ESHUTDOWN);
+
+	/* flush fifo (mostly for IN buffers) */
+	pxa25x_ep_fifo_flush (_ep);
+
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->stopped = 1;
+
+	local_irq_restore(flags);
+	DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* for the pxa25x, these can just wrap kmalloc/kfree.  gadget drivers
+ * must still pass correctly initialized endpoints, since other controller
+ * drivers may care about how it's currently set up (dma issues etc).
+ */
+
+/*
+ *	pxa25x_ep_alloc_request - allocate a request data structure
+ */
+static struct usb_request *
+pxa25x_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct pxa25x_request *req;
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD (&req->queue);
+	return &req->req;
+}
+
+
+/*
+ *	pxa25x_ep_free_request - deallocate a request data structure
+ */
+static void
+pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct pxa25x_request	*req;
+
+	req = container_of (_req, struct pxa25x_request, req);
+	WARN_ON(!list_empty (&req->queue));
+	kfree(req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ *	done - retire a request; caller blocked irqs
+ */
+static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status)
+{
+	unsigned		stopped = ep->stopped;
+
+	list_del_init(&req->queue);
+
+	if (likely (req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (status && status != -ESHUTDOWN)
+		DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	req->req.complete(&ep->ep, &req->req);
+	ep->stopped = stopped;
+}
+
+
+static inline void ep0_idle (struct pxa25x_udc *dev)
+{
+	dev->ep0state = EP0_IDLE;
+}
+
+static int
+write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max)
+{
+	u8		*buf;
+	unsigned	length, count;
+
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	/* how big will this packet be? */
+	length = min(req->req.length - req->req.actual, max);
+	req->req.actual += length;
+
+	count = length;
+	while (likely(count--))
+		*uddr = *buf++;
+
+	return length;
+}
+
+/*
+ * write to an IN endpoint fifo, as many packets as possible.
+ * irqs will use this to write the rest later.
+ * caller guarantees at least one packet buffer is ready (or a zlp).
+ */
+static int
+write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
+{
+	unsigned		max;
+
+	max = usb_endpoint_maxp(ep->desc);
+	do {
+		unsigned	count;
+		int		is_last, is_short;
+
+		count = write_packet(ep->reg_uddr, req, max);
+
+		/* last packet is usually short (or a zlp) */
+		if (unlikely (count != max))
+			is_last = is_short = 1;
+		else {
+			if (likely(req->req.length != req->req.actual)
+					|| req->req.zero)
+				is_last = 0;
+			else
+				is_last = 1;
+			/* interrupt/iso maxpacket may not fill the fifo */
+			is_short = unlikely (max < ep->fifo_size);
+		}
+
+		DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
+			ep->ep.name, count,
+			is_last ? "/L" : "", is_short ? "/S" : "",
+			req->req.length - req->req.actual, req);
+
+		/* let loose that packet. maybe try writing another one,
+		 * double buffering might work.  TSP, TPC, and TFS
+		 * bit values are the same for all normal IN endpoints.
+		 */
+		*ep->reg_udccs = UDCCS_BI_TPC;
+		if (is_short)
+			*ep->reg_udccs = UDCCS_BI_TSP;
+
+		/* requests complete when all IN data is in the FIFO */
+		if (is_last) {
+			done (ep, req, 0);
+			if (list_empty(&ep->queue))
+				pio_irq_disable (ep->bEndpointAddress);
+			return 1;
+		}
+
+		// TODO experiment: how robust can fifo mode tweaking be?
+		// double buffering is off in the default fifo mode, which
+		// prevents TFS from being set here.
+
+	} while (*ep->reg_udccs & UDCCS_BI_TFS);
+	return 0;
+}
+
+/* caller asserts req->pending (ep0 irq status nyet cleared); starts
+ * ep0 data stage.  these chips want very simple state transitions.
+ */
+static inline
+void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag)
+{
+	UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
+	USIR0 = USIR0_IR0;
+	dev->req_pending = 0;
+	DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
+		__func__, tag, UDCCS0, flags);
+}
+
+static int
+write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
+{
+	unsigned	count;
+	int		is_short;
+
+	count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
+	ep->dev->stats.write.bytes += count;
+
+	/* last packet "must be" short (or a zlp) */
+	is_short = (count != EP0_FIFO_SIZE);
+
+	DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
+		req->req.length - req->req.actual, req);
+
+	if (unlikely (is_short)) {
+		if (ep->dev->req_pending)
+			ep0start(ep->dev, UDCCS0_IPR, "short IN");
+		else
+			UDCCS0 = UDCCS0_IPR;
+
+		count = req->req.length;
+		done (ep, req, 0);
+		ep0_idle(ep->dev);
+#ifndef CONFIG_ARCH_IXP4XX
+#if 1
+		/* This seems to get rid of lost status irqs in some cases:
+		 * host responds quickly, or next request involves config
+		 * change automagic, or should have been hidden, or ...
+		 *
+		 * FIXME get rid of all udelays possible...
+		 */
+		if (count >= EP0_FIFO_SIZE) {
+			count = 100;
+			do {
+				if ((UDCCS0 & UDCCS0_OPR) != 0) {
+					/* clear OPR, generate ack */
+					UDCCS0 = UDCCS0_OPR;
+					break;
+				}
+				count--;
+				udelay(1);
+			} while (count);
+		}
+#endif
+#endif
+	} else if (ep->dev->req_pending)
+		ep0start(ep->dev, 0, "IN");
+	return is_short;
+}
+
+
+/*
+ * read_fifo -  unload packet(s) from the fifo we use for usb OUT
+ * transfers and put them into the request.  caller should have made
+ * sure there's at least one packet ready.
+ *
+ * returns true if the request completed because of short packet or the
+ * request buffer having filled (and maybe overran till end-of-packet).
+ */
+static int
+read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
+{
+	for (;;) {
+		u32		udccs;
+		u8		*buf;
+		unsigned	bufferspace, count, is_short;
+
+		/* make sure there's a packet in the FIFO.
+		 * UDCCS_{BO,IO}_RPC are all the same bit value.
+		 * UDCCS_{BO,IO}_RNE are all the same bit value.
+		 */
+		udccs = *ep->reg_udccs;
+		if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
+			break;
+		buf = req->req.buf + req->req.actual;
+		prefetchw(buf);
+		bufferspace = req->req.length - req->req.actual;
+
+		/* read all bytes from this packet */
+		if (likely (udccs & UDCCS_BO_RNE)) {
+			count = 1 + (0x0ff & *ep->reg_ubcr);
+			req->req.actual += min (count, bufferspace);
+		} else /* zlp */
+			count = 0;
+		is_short = (count < ep->ep.maxpacket);
+		DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
+			ep->ep.name, udccs, count,
+			is_short ? "/S" : "",
+			req, req->req.actual, req->req.length);
+		while (likely (count-- != 0)) {
+			u8	byte = (u8) *ep->reg_uddr;
+
+			if (unlikely (bufferspace == 0)) {
+				/* this happens when the driver's buffer
+				 * is smaller than what the host sent.
+				 * discard the extra data.
+				 */
+				if (req->req.status != -EOVERFLOW)
+					DMSG("%s overflow %d\n",
+						ep->ep.name, count);
+				req->req.status = -EOVERFLOW;
+			} else {
+				*buf++ = byte;
+				bufferspace--;
+			}
+		}
+		*ep->reg_udccs =  UDCCS_BO_RPC;
+		/* RPC/RSP/RNE could now reflect the other packet buffer */
+
+		/* iso is one request per packet */
+		if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+			if (udccs & UDCCS_IO_ROF)
+				req->req.status = -EHOSTUNREACH;
+			/* more like "is_done" */
+			is_short = 1;
+		}
+
+		/* completion */
+		if (is_short || req->req.actual == req->req.length) {
+			done (ep, req, 0);
+			if (list_empty(&ep->queue))
+				pio_irq_disable (ep->bEndpointAddress);
+			return 1;
+		}
+
+		/* finished that packet.  the next one may be waiting... */
+	}
+	return 0;
+}
+
+/*
+ * special ep0 version of the above.  no UBCR0 or double buffering; status
+ * handshaking is magic.  most device protocols don't need control-OUT.
+ * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
+ * protocols do use them.
+ */
+static int
+read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
+{
+	u8		*buf, byte;
+	unsigned	bufferspace;
+
+	buf = req->req.buf + req->req.actual;
+	bufferspace = req->req.length - req->req.actual;
+
+	while (UDCCS0 & UDCCS0_RNE) {
+		byte = (u8) UDDR0;
+
+		if (unlikely (bufferspace == 0)) {
+			/* this happens when the driver's buffer
+			 * is smaller than what the host sent.
+			 * discard the extra data.
+			 */
+			if (req->req.status != -EOVERFLOW)
+				DMSG("%s overflow\n", ep->ep.name);
+			req->req.status = -EOVERFLOW;
+		} else {
+			*buf++ = byte;
+			req->req.actual++;
+			bufferspace--;
+		}
+	}
+
+	UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
+
+	/* completion */
+	if (req->req.actual >= req->req.length)
+		return 1;
+
+	/* finished that packet.  the next one may be waiting... */
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct pxa25x_request	*req;
+	struct pxa25x_ep	*ep;
+	struct pxa25x_udc	*dev;
+	unsigned long		flags;
+
+	req = container_of(_req, struct pxa25x_request, req);
+	if (unlikely (!_req || !_req->complete || !_req->buf
+			|| !list_empty(&req->queue))) {
+		DMSG("%s, bad params\n", __func__);
+		return -EINVAL;
+	}
+
+	ep = container_of(_ep, struct pxa25x_ep, ep);
+	if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+		DMSG("%s, bad ep\n", __func__);
+		return -EINVAL;
+	}
+
+	dev = ep->dev;
+	if (unlikely (!dev->driver
+			|| dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+		DMSG("%s, bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	/* iso is always one packet per request, that's the only way
+	 * we can report per-packet status.  that also helps with dma.
+	 */
+	if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+		        && req->req.length > usb_endpoint_maxp (ep->desc)))
+		return -EMSGSIZE;
+
+	DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
+		_ep->name, _req, _req->length, _req->buf);
+
+	local_irq_save(flags);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/* kickstart this i/o queue? */
+	if (list_empty(&ep->queue) && !ep->stopped) {
+		if (ep->desc == NULL/* ep0 */) {
+			unsigned	length = _req->length;
+
+			switch (dev->ep0state) {
+			case EP0_IN_DATA_PHASE:
+				dev->stats.write.ops++;
+				if (write_ep0_fifo(ep, req))
+					req = NULL;
+				break;
+
+			case EP0_OUT_DATA_PHASE:
+				dev->stats.read.ops++;
+				/* messy ... */
+				if (dev->req_config) {
+					DBG(DBG_VERBOSE, "ep0 config ack%s\n",
+						dev->has_cfr ?  "" : " raced");
+					if (dev->has_cfr)
+						UDCCFR = UDCCFR_AREN|UDCCFR_ACM
+							|UDCCFR_MB1;
+					done(ep, req, 0);
+					dev->ep0state = EP0_END_XFER;
+					local_irq_restore (flags);
+					return 0;
+				}
+				if (dev->req_pending)
+					ep0start(dev, UDCCS0_IPR, "OUT");
+				if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
+						&& read_ep0_fifo(ep, req))) {
+					ep0_idle(dev);
+					done(ep, req, 0);
+					req = NULL;
+				}
+				break;
+
+			default:
+				DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
+				local_irq_restore (flags);
+				return -EL2HLT;
+			}
+		/* can the FIFO can satisfy the request immediately? */
+		} else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
+			if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
+					&& write_fifo(ep, req))
+				req = NULL;
+		} else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
+				&& read_fifo(ep, req)) {
+			req = NULL;
+		}
+
+		if (likely (req && ep->desc))
+			pio_irq_enable(ep->bEndpointAddress);
+	}
+
+	/* pio or dma irq handler advances the queue. */
+	if (likely(req != NULL))
+		list_add_tail(&req->queue, &ep->queue);
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+
+/*
+ *	nuke - dequeue ALL requests
+ */
+static void nuke(struct pxa25x_ep *ep, int status)
+{
+	struct pxa25x_request *req;
+
+	/* called with irqs blocked */
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next,
+				struct pxa25x_request,
+				queue);
+		done(ep, req, status);
+	}
+	if (ep->desc)
+		pio_irq_disable (ep->bEndpointAddress);
+}
+
+
+/* dequeue JUST ONE request */
+static int pxa25x_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct pxa25x_ep	*ep;
+	struct pxa25x_request	*req;
+	unsigned long		flags;
+
+	ep = container_of(_ep, struct pxa25x_ep, ep);
+	if (!_ep || ep->ep.name == ep0name)
+		return -EINVAL;
+
+	local_irq_save(flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		local_irq_restore(flags);
+		return -EINVAL;
+	}
+
+	done(ep, req, -ECONNRESET);
+
+	local_irq_restore(flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct pxa25x_ep	*ep;
+	unsigned long		flags;
+
+	ep = container_of(_ep, struct pxa25x_ep, ep);
+	if (unlikely (!_ep
+			|| (!ep->desc && ep->ep.name != ep0name))
+			|| ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		DMSG("%s, bad ep\n", __func__);
+		return -EINVAL;
+	}
+	if (value == 0) {
+		/* this path (reset toggle+halt) is needed to implement
+		 * SET_INTERFACE on normal hardware.  but it can't be
+		 * done from software on the PXA UDC, and the hardware
+		 * forgets to do it as part of SET_INTERFACE automagic.
+		 */
+		DMSG("only host can clear %s halt\n", _ep->name);
+		return -EROFS;
+	}
+
+	local_irq_save(flags);
+
+	if ((ep->bEndpointAddress & USB_DIR_IN) != 0
+			&& ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
+			   || !list_empty(&ep->queue))) {
+		local_irq_restore(flags);
+		return -EAGAIN;
+	}
+
+	/* FST bit is the same for control, bulk in, bulk out, interrupt in */
+	*ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
+
+	/* ep0 needs special care */
+	if (!ep->desc) {
+		start_watchdog(ep->dev);
+		ep->dev->req_pending = 0;
+		ep->dev->ep0state = EP0_STALL;
+
+	/* and bulk/intr endpoints like dropping stalls too */
+	} else {
+		unsigned i;
+		for (i = 0; i < 1000; i += 20) {
+			if (*ep->reg_udccs & UDCCS_BI_SST)
+				break;
+			udelay(20);
+		}
+	}
+	local_irq_restore(flags);
+
+	DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
+	return 0;
+}
+
+static int pxa25x_ep_fifo_status(struct usb_ep *_ep)
+{
+	struct pxa25x_ep        *ep;
+
+	ep = container_of(_ep, struct pxa25x_ep, ep);
+	if (!_ep) {
+		DMSG("%s, bad ep\n", __func__);
+		return -ENODEV;
+	}
+	/* pxa can't report unclaimed bytes from IN fifos */
+	if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
+		return -EOPNOTSUPP;
+	if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
+			|| (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
+		return 0;
+	else
+		return (*ep->reg_ubcr & 0xfff) + 1;
+}
+
+static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct pxa25x_ep        *ep;
+
+	ep = container_of(_ep, struct pxa25x_ep, ep);
+	if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
+		DMSG("%s, bad ep\n", __func__);
+		return;
+	}
+
+	/* toggle and halt bits stay unchanged */
+
+	/* for OUT, just read and discard the FIFO contents. */
+	if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
+		while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
+			(void) *ep->reg_uddr;
+		return;
+	}
+
+	/* most IN status is the same, but ISO can't stall */
+	*ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
+		| (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+			? 0 : UDCCS_BI_SST);
+}
+
+
+static struct usb_ep_ops pxa25x_ep_ops = {
+	.enable		= pxa25x_ep_enable,
+	.disable	= pxa25x_ep_disable,
+
+	.alloc_request	= pxa25x_ep_alloc_request,
+	.free_request	= pxa25x_ep_free_request,
+
+	.queue		= pxa25x_ep_queue,
+	.dequeue	= pxa25x_ep_dequeue,
+
+	.set_halt	= pxa25x_ep_set_halt,
+	.fifo_status	= pxa25x_ep_fifo_status,
+	.fifo_flush	= pxa25x_ep_fifo_flush,
+};
+
+
+/* ---------------------------------------------------------------------------
+ *	device-scoped parts of the api to the usb controller hardware
+ * ---------------------------------------------------------------------------
+ */
+
+static int pxa25x_udc_get_frame(struct usb_gadget *_gadget)
+{
+	return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
+}
+
+static int pxa25x_udc_wakeup(struct usb_gadget *_gadget)
+{
+	/* host may not have enabled remote wakeup */
+	if ((UDCCS0 & UDCCS0_DRWF) == 0)
+		return -EHOSTUNREACH;
+	udc_set_mask_UDCCR(UDCCR_RSM);
+	return 0;
+}
+
+static void stop_activity(struct pxa25x_udc *, struct usb_gadget_driver *);
+static void udc_enable (struct pxa25x_udc *);
+static void udc_disable(struct pxa25x_udc *);
+
+/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
+ * in active use.
+ */
+static int pullup(struct pxa25x_udc *udc)
+{
+	int is_active = udc->vbus && udc->pullup && !udc->suspended;
+	DMSG("%s\n", is_active ? "active" : "inactive");
+	if (is_active) {
+		if (!udc->active) {
+			udc->active = 1;
+			/* Enable clock for USB device */
+			clk_enable(udc->clk);
+			udc_enable(udc);
+		}
+	} else {
+		if (udc->active) {
+			if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+				DMSG("disconnect %s\n", udc->driver
+					? udc->driver->driver.name
+					: "(no driver)");
+				stop_activity(udc, udc->driver);
+			}
+			udc_disable(udc);
+			/* Disable clock for USB device */
+			clk_disable(udc->clk);
+			udc->active = 0;
+		}
+
+	}
+	return 0;
+}
+
+/* VBUS reporting logically comes from a transceiver */
+static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct pxa25x_udc	*udc;
+
+	udc = container_of(_gadget, struct pxa25x_udc, gadget);
+	udc->vbus = is_active;
+	DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
+	pullup(udc);
+	return 0;
+}
+
+/* drivers may have software control over D+ pullup */
+static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
+{
+	struct pxa25x_udc	*udc;
+
+	udc = container_of(_gadget, struct pxa25x_udc, gadget);
+
+	/* not all boards support pullup control */
+	if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
+		return -EOPNOTSUPP;
+
+	udc->pullup = (is_active != 0);
+	pullup(udc);
+	return 0;
+}
+
+/* boards may consume current from VBUS, up to 100-500mA based on config.
+ * the 500uA suspend ceiling means that exclusively vbus-powered PXA designs
+ * violate USB specs.
+ */
+static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+	struct pxa25x_udc	*udc;
+
+	udc = container_of(_gadget, struct pxa25x_udc, gadget);
+
+	if (udc->transceiver)
+		return usb_phy_set_power(udc->transceiver, mA);
+	return -EOPNOTSUPP;
+}
+
+static int pxa25x_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int pxa25x_stop(struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops pxa25x_udc_ops = {
+	.get_frame	= pxa25x_udc_get_frame,
+	.wakeup		= pxa25x_udc_wakeup,
+	.vbus_session	= pxa25x_udc_vbus_session,
+	.pullup		= pxa25x_udc_pullup,
+	.vbus_draw	= pxa25x_udc_vbus_draw,
+	.start		= pxa25x_start,
+	.stop		= pxa25x_stop,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+
+static int
+udc_seq_show(struct seq_file *m, void *_d)
+{
+	struct pxa25x_udc	*dev = m->private;
+	unsigned long		flags;
+	int			i;
+	u32			tmp;
+
+	local_irq_save(flags);
+
+	/* basic device status */
+	seq_printf(m, DRIVER_DESC "\n"
+		"%s version: %s\nGadget driver: %s\nHost %s\n\n",
+		driver_name, DRIVER_VERSION SIZE_STR "(pio)",
+		dev->driver ? dev->driver->driver.name : "(none)",
+		dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected");
+
+	/* registers for device and ep0 */
+	seq_printf(m,
+		"uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
+		UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
+
+	tmp = UDCCR;
+	seq_printf(m,
+		"udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
+		(tmp & UDCCR_REM) ? " rem" : "",
+		(tmp & UDCCR_RSTIR) ? " rstir" : "",
+		(tmp & UDCCR_SRM) ? " srm" : "",
+		(tmp & UDCCR_SUSIR) ? " susir" : "",
+		(tmp & UDCCR_RESIR) ? " resir" : "",
+		(tmp & UDCCR_RSM) ? " rsm" : "",
+		(tmp & UDCCR_UDA) ? " uda" : "",
+		(tmp & UDCCR_UDE) ? " ude" : "");
+
+	tmp = UDCCS0;
+	seq_printf(m,
+		"udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
+		(tmp & UDCCS0_SA) ? " sa" : "",
+		(tmp & UDCCS0_RNE) ? " rne" : "",
+		(tmp & UDCCS0_FST) ? " fst" : "",
+		(tmp & UDCCS0_SST) ? " sst" : "",
+		(tmp & UDCCS0_DRWF) ? " dwrf" : "",
+		(tmp & UDCCS0_FTF) ? " ftf" : "",
+		(tmp & UDCCS0_IPR) ? " ipr" : "",
+		(tmp & UDCCS0_OPR) ? " opr" : "");
+
+	if (dev->has_cfr) {
+		tmp = UDCCFR;
+		seq_printf(m,
+			"udccfr %02X =%s%s\n", tmp,
+			(tmp & UDCCFR_AREN) ? " aren" : "",
+			(tmp & UDCCFR_ACM) ? " acm" : "");
+	}
+
+	if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver)
+		goto done;
+
+	seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
+		dev->stats.write.bytes, dev->stats.write.ops,
+		dev->stats.read.bytes, dev->stats.read.ops,
+		dev->stats.irqs);
+
+	/* dump endpoint queues */
+	for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+		struct pxa25x_ep	*ep = &dev->ep [i];
+		struct pxa25x_request	*req;
+
+		if (i != 0) {
+			const struct usb_endpoint_descriptor	*desc;
+
+			desc = ep->desc;
+			if (!desc)
+				continue;
+			tmp = *dev->ep [i].reg_udccs;
+			seq_printf(m,
+				"%s max %d %s udccs %02x irqs %lu\n",
+				ep->ep.name, usb_endpoint_maxp(desc),
+				"pio", tmp, ep->pio_irqs);
+			/* TODO translate all five groups of udccs bits! */
+
+		} else /* ep0 should only have one transfer queued */
+			seq_printf(m, "ep0 max 16 pio irqs %lu\n",
+				ep->pio_irqs);
+
+		if (list_empty(&ep->queue)) {
+			seq_printf(m, "\t(nothing queued)\n");
+			continue;
+		}
+		list_for_each_entry(req, &ep->queue, queue) {
+			seq_printf(m,
+					"\treq %p len %d/%d buf %p\n",
+					&req->req, req->req.actual,
+					req->req.length, req->req.buf);
+		}
+	}
+
+done:
+	local_irq_restore(flags);
+	return 0;
+}
+
+static int
+udc_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, udc_seq_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+	.open		= udc_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.owner		= THIS_MODULE,
+};
+
+#define create_debug_files(dev) \
+	do { \
+		dev->debugfs_udc = debugfs_create_file(dev->gadget.name, \
+			S_IRUGO, NULL, dev, &debug_fops); \
+	} while (0)
+#define remove_debug_files(dev) \
+	do { \
+		if (dev->debugfs_udc) \
+			debugfs_remove(dev->debugfs_udc); \
+	} while (0)
+
+#else	/* !CONFIG_USB_GADGET_DEBUG_FILES */
+
+#define create_debug_files(dev) do {} while (0)
+#define remove_debug_files(dev) do {} while (0)
+
+#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ *	udc_disable - disable USB device controller
+ */
+static void udc_disable(struct pxa25x_udc *dev)
+{
+	/* block all irqs */
+	udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
+	UICR0 = UICR1 = 0xff;
+	UFNRH = UFNRH_SIM;
+
+	/* if hardware supports it, disconnect from usb */
+	pullup_off();
+
+	udc_clear_mask_UDCCR(UDCCR_UDE);
+
+	ep0_idle (dev);
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+
+/*
+ *	udc_reinit - initialize software state
+ */
+static void udc_reinit(struct pxa25x_udc *dev)
+{
+	u32	i;
+
+	/* device/ep0 records init */
+	INIT_LIST_HEAD (&dev->gadget.ep_list);
+	INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
+	dev->ep0state = EP0_IDLE;
+
+	/* basic endpoint records init */
+	for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+		struct pxa25x_ep *ep = &dev->ep[i];
+
+		if (i != 0)
+			list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
+
+		ep->desc = NULL;
+		ep->ep.desc = NULL;
+		ep->stopped = 0;
+		INIT_LIST_HEAD (&ep->queue);
+		ep->pio_irqs = 0;
+	}
+
+	/* the rest was statically initialized, and is read-only */
+}
+
+/* until it's enabled, this UDC should be completely invisible
+ * to any USB host.
+ */
+static void udc_enable (struct pxa25x_udc *dev)
+{
+	udc_clear_mask_UDCCR(UDCCR_UDE);
+
+	/* try to clear these bits before we enable the udc */
+	udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
+
+	ep0_idle(dev);
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	dev->stats.irqs = 0;
+
+	/*
+	 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
+	 * - enable UDC
+	 * - if RESET is already in progress, ack interrupt
+	 * - unmask reset interrupt
+	 */
+	udc_set_mask_UDCCR(UDCCR_UDE);
+	if (!(UDCCR & UDCCR_UDA))
+		udc_ack_int_UDCCR(UDCCR_RSTIR);
+
+	if (dev->has_cfr /* UDC_RES2 is defined */) {
+		/* pxa255 (a0+) can avoid a set_config race that could
+		 * prevent gadget drivers from configuring correctly
+		 */
+		UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
+	} else {
+		/* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
+		 * which could result in missing packets and interrupts.
+		 * supposedly one bit per endpoint, controlling whether it
+		 * double buffers or not; ACM/AREN bits fit into the holes.
+		 * zero bits (like USIR0_IRx) disable double buffering.
+		 */
+		UDC_RES1 = 0x00;
+		UDC_RES2 = 0x00;
+	}
+
+	/* enable suspend/resume and reset irqs */
+	udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
+
+	/* enable ep0 irqs */
+	UICR0 &= ~UICR0_IM0;
+
+	/* if hardware supports it, pullup D+ and wait for reset */
+	pullup_on();
+}
+
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int pxa25x_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct pxa25x_udc	*dev = the_controller;
+	int			retval;
+
+	if (!driver
+			|| driver->max_speed < USB_SPEED_FULL
+			|| !bind
+			|| !driver->disconnect
+			|| !driver->setup)
+		return -EINVAL;
+	if (!dev)
+		return -ENODEV;
+	if (dev->driver)
+		return -EBUSY;
+
+	/* first hook up the driver ... */
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+	dev->pullup = 1;
+
+	retval = device_add (&dev->gadget.dev);
+	if (retval) {
+fail:
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+	retval = bind(&dev->gadget);
+	if (retval) {
+		DMSG("bind to driver %s --> error %d\n",
+				driver->driver.name, retval);
+		device_del (&dev->gadget.dev);
+		goto fail;
+	}
+
+	/* ... then enable host detection and ep0; and we're ready
+	 * for set_configuration as well as eventual disconnect.
+	 */
+	DMSG("registered gadget driver '%s'\n", driver->driver.name);
+
+	/* connect to bus through transceiver */
+	if (dev->transceiver) {
+		retval = otg_set_peripheral(dev->transceiver->otg,
+						&dev->gadget);
+		if (retval) {
+			DMSG("can't bind to transceiver\n");
+			if (driver->unbind)
+				driver->unbind(&dev->gadget);
+			goto bind_fail;
+		}
+	}
+
+	pullup(dev);
+	dump_state(dev);
+	return 0;
+bind_fail:
+	return retval;
+}
+
+static void
+stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
+{
+	int i;
+
+	/* don't disconnect drivers more than once */
+	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+	/* prevent new request submissions, kill any outstanding requests  */
+	for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+		struct pxa25x_ep *ep = &dev->ep[i];
+
+		ep->stopped = 1;
+		nuke(ep, -ESHUTDOWN);
+	}
+	del_timer_sync(&dev->timer);
+
+	/* report disconnect; the driver is already quiesced */
+	if (driver)
+		driver->disconnect(&dev->gadget);
+
+	/* re-init driver-visible data structures */
+	udc_reinit(dev);
+}
+
+static int pxa25x_stop(struct usb_gadget_driver *driver)
+{
+	struct pxa25x_udc	*dev = the_controller;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver || !driver->unbind)
+		return -EINVAL;
+
+	local_irq_disable();
+	dev->pullup = 0;
+	pullup(dev);
+	stop_activity(dev, driver);
+	local_irq_enable();
+
+	if (dev->transceiver)
+		(void) otg_set_peripheral(dev->transceiver->otg, NULL);
+
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	device_del (&dev->gadget.dev);
+
+	DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
+	dump_state(dev);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_LUBBOCK
+
+/* Lubbock has separate connect and disconnect irqs.  More typical designs
+ * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
+ */
+
+static irqreturn_t
+lubbock_vbus_irq(int irq, void *_dev)
+{
+	struct pxa25x_udc	*dev = _dev;
+	int			vbus;
+
+	dev->stats.irqs++;
+	switch (irq) {
+	case LUBBOCK_USB_IRQ:
+		vbus = 1;
+		disable_irq(LUBBOCK_USB_IRQ);
+		enable_irq(LUBBOCK_USB_DISC_IRQ);
+		break;
+	case LUBBOCK_USB_DISC_IRQ:
+		vbus = 0;
+		disable_irq(LUBBOCK_USB_DISC_IRQ);
+		enable_irq(LUBBOCK_USB_IRQ);
+		break;
+	default:
+		return IRQ_NONE;
+	}
+
+	pxa25x_udc_vbus_session(&dev->gadget, vbus);
+	return IRQ_HANDLED;
+}
+
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline void clear_ep_state (struct pxa25x_udc *dev)
+{
+	unsigned i;
+
+	/* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
+	 * fifos, and pending transactions mustn't be continued in any case.
+	 */
+	for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
+		nuke(&dev->ep[i], -ECONNABORTED);
+}
+
+static void udc_watchdog(unsigned long _dev)
+{
+	struct pxa25x_udc	*dev = (void *)_dev;
+
+	local_irq_disable();
+	if (dev->ep0state == EP0_STALL
+			&& (UDCCS0 & UDCCS0_FST) == 0
+			&& (UDCCS0 & UDCCS0_SST) == 0) {
+		UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
+		DBG(DBG_VERBOSE, "ep0 re-stall\n");
+		start_watchdog(dev);
+	}
+	local_irq_enable();
+}
+
+static void handle_ep0 (struct pxa25x_udc *dev)
+{
+	u32			udccs0 = UDCCS0;
+	struct pxa25x_ep	*ep = &dev->ep [0];
+	struct pxa25x_request	*req;
+	union {
+		struct usb_ctrlrequest	r;
+		u8			raw [8];
+		u32			word [2];
+	} u;
+
+	if (list_empty(&ep->queue))
+		req = NULL;
+	else
+		req = list_entry(ep->queue.next, struct pxa25x_request, queue);
+
+	/* clear stall status */
+	if (udccs0 & UDCCS0_SST) {
+		nuke(ep, -EPIPE);
+		UDCCS0 = UDCCS0_SST;
+		del_timer(&dev->timer);
+		ep0_idle(dev);
+	}
+
+	/* previous request unfinished?  non-error iff back-to-back ... */
+	if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
+		nuke(ep, 0);
+		del_timer(&dev->timer);
+		ep0_idle(dev);
+	}
+
+	switch (dev->ep0state) {
+	case EP0_IDLE:
+		/* late-breaking status? */
+		udccs0 = UDCCS0;
+
+		/* start control request? */
+		if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
+				== (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
+			int i;
+
+			nuke (ep, -EPROTO);
+
+			/* read SETUP packet */
+			for (i = 0; i < 8; i++) {
+				if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
+bad_setup:
+					DMSG("SETUP %d!\n", i);
+					goto stall;
+				}
+				u.raw [i] = (u8) UDDR0;
+			}
+			if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
+				goto bad_setup;
+
+got_setup:
+			DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+				u.r.bRequestType, u.r.bRequest,
+				le16_to_cpu(u.r.wValue),
+				le16_to_cpu(u.r.wIndex),
+				le16_to_cpu(u.r.wLength));
+
+			/* cope with automagic for some standard requests. */
+			dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
+						== USB_TYPE_STANDARD;
+			dev->req_config = 0;
+			dev->req_pending = 1;
+			switch (u.r.bRequest) {
+			/* hardware restricts gadget drivers here! */
+			case USB_REQ_SET_CONFIGURATION:
+				if (u.r.bRequestType == USB_RECIP_DEVICE) {
+					/* reflect hardware's automagic
+					 * up to the gadget driver.
+					 */
+config_change:
+					dev->req_config = 1;
+					clear_ep_state(dev);
+					/* if !has_cfr, there's no synch
+					 * else use AREN (later) not SA|OPR
+					 * USIR0_IR0 acts edge sensitive
+					 */
+				}
+				break;
+			/* ... and here, even more ... */
+			case USB_REQ_SET_INTERFACE:
+				if (u.r.bRequestType == USB_RECIP_INTERFACE) {
+					/* udc hardware is broken by design:
+					 *  - altsetting may only be zero;
+					 *  - hw resets all interfaces' eps;
+					 *  - ep reset doesn't include halt(?).
+					 */
+					DMSG("broken set_interface (%d/%d)\n",
+						le16_to_cpu(u.r.wIndex),
+						le16_to_cpu(u.r.wValue));
+					goto config_change;
+				}
+				break;
+			/* hardware was supposed to hide this */
+			case USB_REQ_SET_ADDRESS:
+				if (u.r.bRequestType == USB_RECIP_DEVICE) {
+					ep0start(dev, 0, "address");
+					return;
+				}
+				break;
+			}
+
+			if (u.r.bRequestType & USB_DIR_IN)
+				dev->ep0state = EP0_IN_DATA_PHASE;
+			else
+				dev->ep0state = EP0_OUT_DATA_PHASE;
+
+			i = dev->driver->setup(&dev->gadget, &u.r);
+			if (i < 0) {
+				/* hardware automagic preventing STALL... */
+				if (dev->req_config) {
+					/* hardware sometimes neglects to tell
+					 * tell us about config change events,
+					 * so later ones may fail...
+					 */
+					WARNING("config change %02x fail %d?\n",
+						u.r.bRequest, i);
+					return;
+					/* TODO experiment:  if has_cfr,
+					 * hardware didn't ACK; maybe we
+					 * could actually STALL!
+					 */
+				}
+				DBG(DBG_VERBOSE, "protocol STALL, "
+					"%02x err %d\n", UDCCS0, i);
+stall:
+				/* the watchdog timer helps deal with cases
+				 * where udc seems to clear FST wrongly, and
+				 * then NAKs instead of STALLing.
+				 */
+				ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
+				start_watchdog(dev);
+				dev->ep0state = EP0_STALL;
+
+			/* deferred i/o == no response yet */
+			} else if (dev->req_pending) {
+				if (likely(dev->ep0state == EP0_IN_DATA_PHASE
+						|| dev->req_std || u.r.wLength))
+					ep0start(dev, 0, "defer");
+				else
+					ep0start(dev, UDCCS0_IPR, "defer/IPR");
+			}
+
+			/* expect at least one data or status stage irq */
+			return;
+
+		} else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
+				== (UDCCS0_OPR|UDCCS0_SA))) {
+			unsigned i;
+
+			/* pxa210/250 erratum 131 for B0/B1 says RNE lies.
+			 * still observed on a pxa255 a0.
+			 */
+			DBG(DBG_VERBOSE, "e131\n");
+			nuke(ep, -EPROTO);
+
+			/* read SETUP data, but don't trust it too much */
+			for (i = 0; i < 8; i++)
+				u.raw [i] = (u8) UDDR0;
+			if ((u.r.bRequestType & USB_RECIP_MASK)
+					> USB_RECIP_OTHER)
+				goto stall;
+			if (u.word [0] == 0 && u.word [1] == 0)
+				goto stall;
+			goto got_setup;
+		} else {
+			/* some random early IRQ:
+			 * - we acked FST
+			 * - IPR cleared
+			 * - OPR got set, without SA (likely status stage)
+			 */
+			UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
+		}
+		break;
+	case EP0_IN_DATA_PHASE:			/* GET_DESCRIPTOR etc */
+		if (udccs0 & UDCCS0_OPR) {
+			UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
+			DBG(DBG_VERBOSE, "ep0in premature status\n");
+			if (req)
+				done(ep, req, 0);
+			ep0_idle(dev);
+		} else /* irq was IPR clearing */ {
+			if (req) {
+				/* this IN packet might finish the request */
+				(void) write_ep0_fifo(ep, req);
+			} /* else IN token before response was written */
+		}
+		break;
+	case EP0_OUT_DATA_PHASE:		/* SET_DESCRIPTOR etc */
+		if (udccs0 & UDCCS0_OPR) {
+			if (req) {
+				/* this OUT packet might finish the request */
+				if (read_ep0_fifo(ep, req))
+					done(ep, req, 0);
+				/* else more OUT packets expected */
+			} /* else OUT token before read was issued */
+		} else /* irq was IPR clearing */ {
+			DBG(DBG_VERBOSE, "ep0out premature status\n");
+			if (req)
+				done(ep, req, 0);
+			ep0_idle(dev);
+		}
+		break;
+	case EP0_END_XFER:
+		if (req)
+			done(ep, req, 0);
+		/* ack control-IN status (maybe in-zlp was skipped)
+		 * also appears after some config change events.
+		 */
+		if (udccs0 & UDCCS0_OPR)
+			UDCCS0 = UDCCS0_OPR;
+		ep0_idle(dev);
+		break;
+	case EP0_STALL:
+		UDCCS0 = UDCCS0_FST;
+		break;
+	}
+	USIR0 = USIR0_IR0;
+}
+
+static void handle_ep(struct pxa25x_ep *ep)
+{
+	struct pxa25x_request	*req;
+	int			is_in = ep->bEndpointAddress & USB_DIR_IN;
+	int			completed;
+	u32			udccs, tmp;
+
+	do {
+		completed = 0;
+		if (likely (!list_empty(&ep->queue)))
+			req = list_entry(ep->queue.next,
+					struct pxa25x_request, queue);
+		else
+			req = NULL;
+
+		// TODO check FST handling
+
+		udccs = *ep->reg_udccs;
+		if (unlikely(is_in)) {	/* irq from TPC, SST, or (ISO) TUR */
+			tmp = UDCCS_BI_TUR;
+			if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
+				tmp |= UDCCS_BI_SST;
+			tmp &= udccs;
+			if (likely (tmp))
+				*ep->reg_udccs = tmp;
+			if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
+				completed = write_fifo(ep, req);
+
+		} else {	/* irq from RPC (or for ISO, ROF) */
+			if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
+				tmp = UDCCS_BO_SST | UDCCS_BO_DME;
+			else
+				tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
+			tmp &= udccs;
+			if (likely(tmp))
+				*ep->reg_udccs = tmp;
+
+			/* fifos can hold packets, ready for reading... */
+			if (likely(req)) {
+				completed = read_fifo(ep, req);
+			} else
+				pio_irq_disable (ep->bEndpointAddress);
+		}
+		ep->pio_irqs++;
+	} while (completed);
+}
+
+/*
+ *	pxa25x_udc_irq - interrupt handler
+ *
+ * avoid delays in ep0 processing. the control handshaking isn't always
+ * under software control (pxa250c0 and the pxa255 are better), and delays
+ * could cause usb protocol errors.
+ */
+static irqreturn_t
+pxa25x_udc_irq(int irq, void *_dev)
+{
+	struct pxa25x_udc	*dev = _dev;
+	int			handled;
+
+	dev->stats.irqs++;
+	do {
+		u32		udccr = UDCCR;
+
+		handled = 0;
+
+		/* SUSpend Interrupt Request */
+		if (unlikely(udccr & UDCCR_SUSIR)) {
+			udc_ack_int_UDCCR(UDCCR_SUSIR);
+			handled = 1;
+			DBG(DBG_VERBOSE, "USB suspend\n");
+
+			if (dev->gadget.speed != USB_SPEED_UNKNOWN
+					&& dev->driver
+					&& dev->driver->suspend)
+				dev->driver->suspend(&dev->gadget);
+			ep0_idle (dev);
+		}
+
+		/* RESume Interrupt Request */
+		if (unlikely(udccr & UDCCR_RESIR)) {
+			udc_ack_int_UDCCR(UDCCR_RESIR);
+			handled = 1;
+			DBG(DBG_VERBOSE, "USB resume\n");
+
+			if (dev->gadget.speed != USB_SPEED_UNKNOWN
+					&& dev->driver
+					&& dev->driver->resume)
+				dev->driver->resume(&dev->gadget);
+		}
+
+		/* ReSeT Interrupt Request - USB reset */
+		if (unlikely(udccr & UDCCR_RSTIR)) {
+			udc_ack_int_UDCCR(UDCCR_RSTIR);
+			handled = 1;
+
+			if ((UDCCR & UDCCR_UDA) == 0) {
+				DBG(DBG_VERBOSE, "USB reset start\n");
+
+				/* reset driver and endpoints,
+				 * in case that's not yet done
+				 */
+				stop_activity (dev, dev->driver);
+
+			} else {
+				DBG(DBG_VERBOSE, "USB reset end\n");
+				dev->gadget.speed = USB_SPEED_FULL;
+				memset(&dev->stats, 0, sizeof dev->stats);
+				/* driver and endpoints are still reset */
+			}
+
+		} else {
+			u32	usir0 = USIR0 & ~UICR0;
+			u32	usir1 = USIR1 & ~UICR1;
+			int	i;
+
+			if (unlikely (!usir0 && !usir1))
+				continue;
+
+			DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
+
+			/* control traffic */
+			if (usir0 & USIR0_IR0) {
+				dev->ep[0].pio_irqs++;
+				handle_ep0(dev);
+				handled = 1;
+			}
+
+			/* endpoint data transfers */
+			for (i = 0; i < 8; i++) {
+				u32	tmp = 1 << i;
+
+				if (i && (usir0 & tmp)) {
+					handle_ep(&dev->ep[i]);
+					USIR0 |= tmp;
+					handled = 1;
+				}
+#ifndef	CONFIG_USB_PXA25X_SMALL
+				if (usir1 & tmp) {
+					handle_ep(&dev->ep[i+8]);
+					USIR1 |= tmp;
+					handled = 1;
+				}
+#endif
+			}
+		}
+
+		/* we could also ask for 1 msec SOF (SIR) interrupts */
+
+	} while (handled);
+	return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void nop_release (struct device *dev)
+{
+	DMSG("%s %s\n", __func__, dev_name(dev));
+}
+
+/* this uses load-time allocation and initialization (instead of
+ * doing it at run-time) to save code, eliminate fault paths, and
+ * be more obviously correct.
+ */
+static struct pxa25x_udc memory = {
+	.gadget = {
+		.ops		= &pxa25x_udc_ops,
+		.ep0		= &memory.ep[0].ep,
+		.name		= driver_name,
+		.dev = {
+			.init_name	= "gadget",
+			.release	= nop_release,
+		},
+	},
+
+	/* control endpoint */
+	.ep[0] = {
+		.ep = {
+			.name		= ep0name,
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= EP0_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.reg_udccs	= &UDCCS0,
+		.reg_uddr	= &UDDR0,
+	},
+
+	/* first group of endpoints */
+	.ep[1] = {
+		.ep = {
+			.name		= "ep1in-bulk",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 1,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS1,
+		.reg_uddr	= &UDDR1,
+	},
+	.ep[2] = {
+		.ep = {
+			.name		= "ep2out-bulk",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = 2,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS2,
+		.reg_ubcr	= &UBCR2,
+		.reg_uddr	= &UDDR2,
+	},
+#ifndef CONFIG_USB_PXA25X_SMALL
+	.ep[3] = {
+		.ep = {
+			.name		= "ep3in-iso",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 3,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS3,
+		.reg_uddr	= &UDDR3,
+	},
+	.ep[4] = {
+		.ep = {
+			.name		= "ep4out-iso",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = 4,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS4,
+		.reg_ubcr	= &UBCR4,
+		.reg_uddr	= &UDDR4,
+	},
+	.ep[5] = {
+		.ep = {
+			.name		= "ep5in-int",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= INT_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= INT_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 5,
+		.bmAttributes	= USB_ENDPOINT_XFER_INT,
+		.reg_udccs	= &UDCCS5,
+		.reg_uddr	= &UDDR5,
+	},
+
+	/* second group of endpoints */
+	.ep[6] = {
+		.ep = {
+			.name		= "ep6in-bulk",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 6,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS6,
+		.reg_uddr	= &UDDR6,
+	},
+	.ep[7] = {
+		.ep = {
+			.name		= "ep7out-bulk",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = 7,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS7,
+		.reg_ubcr	= &UBCR7,
+		.reg_uddr	= &UDDR7,
+	},
+	.ep[8] = {
+		.ep = {
+			.name		= "ep8in-iso",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 8,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS8,
+		.reg_uddr	= &UDDR8,
+	},
+	.ep[9] = {
+		.ep = {
+			.name		= "ep9out-iso",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = 9,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS9,
+		.reg_ubcr	= &UBCR9,
+		.reg_uddr	= &UDDR9,
+	},
+	.ep[10] = {
+		.ep = {
+			.name		= "ep10in-int",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= INT_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= INT_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 10,
+		.bmAttributes	= USB_ENDPOINT_XFER_INT,
+		.reg_udccs	= &UDCCS10,
+		.reg_uddr	= &UDDR10,
+	},
+
+	/* third group of endpoints */
+	.ep[11] = {
+		.ep = {
+			.name		= "ep11in-bulk",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 11,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS11,
+		.reg_uddr	= &UDDR11,
+	},
+	.ep[12] = {
+		.ep = {
+			.name		= "ep12out-bulk",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= BULK_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= BULK_FIFO_SIZE,
+		.bEndpointAddress = 12,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+		.reg_udccs	= &UDCCS12,
+		.reg_ubcr	= &UBCR12,
+		.reg_uddr	= &UDDR12,
+	},
+	.ep[13] = {
+		.ep = {
+			.name		= "ep13in-iso",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 13,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS13,
+		.reg_uddr	= &UDDR13,
+	},
+	.ep[14] = {
+		.ep = {
+			.name		= "ep14out-iso",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= ISO_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= ISO_FIFO_SIZE,
+		.bEndpointAddress = 14,
+		.bmAttributes	= USB_ENDPOINT_XFER_ISOC,
+		.reg_udccs	= &UDCCS14,
+		.reg_ubcr	= &UBCR14,
+		.reg_uddr	= &UDDR14,
+	},
+	.ep[15] = {
+		.ep = {
+			.name		= "ep15in-int",
+			.ops		= &pxa25x_ep_ops,
+			.maxpacket	= INT_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= INT_FIFO_SIZE,
+		.bEndpointAddress = USB_DIR_IN | 15,
+		.bmAttributes	= USB_ENDPOINT_XFER_INT,
+		.reg_udccs	= &UDCCS15,
+		.reg_uddr	= &UDDR15,
+	},
+#endif /* !CONFIG_USB_PXA25X_SMALL */
+};
+
+#define CP15R0_VENDOR_MASK	0xffffe000
+
+#if	defined(CONFIG_ARCH_PXA)
+#define CP15R0_XSCALE_VALUE	0x69052000	/* intel/arm/xscale */
+
+#elif	defined(CONFIG_ARCH_IXP4XX)
+#define CP15R0_XSCALE_VALUE	0x69054000	/* intel/arm/ixp4xx */
+
+#endif
+
+#define CP15R0_PROD_MASK	0x000003f0
+#define PXA25x			0x00000100	/* and PXA26x */
+#define PXA210			0x00000120
+
+#define CP15R0_REV_MASK		0x0000000f
+
+#define CP15R0_PRODREV_MASK	(CP15R0_PROD_MASK | CP15R0_REV_MASK)
+
+#define PXA255_A0		0x00000106	/* or PXA260_B1 */
+#define PXA250_C0		0x00000105	/* or PXA26x_B0 */
+#define PXA250_B2		0x00000104
+#define PXA250_B1		0x00000103	/* or PXA260_A0 */
+#define PXA250_B0		0x00000102
+#define PXA250_A1		0x00000101
+#define PXA250_A0		0x00000100
+
+#define PXA210_C0		0x00000125
+#define PXA210_B2		0x00000124
+#define PXA210_B1		0x00000123
+#define PXA210_B0		0x00000122
+#define IXP425_A0		0x000001c1
+#define IXP425_B0		0x000001f1
+#define IXP465_AD		0x00000200
+
+/*
+ *	probe - binds to the platform device
+ */
+static int __init pxa25x_udc_probe(struct platform_device *pdev)
+{
+	struct pxa25x_udc *dev = &memory;
+	int retval, irq;
+	u32 chiprev;
+
+	/* insist on Intel/ARM/XScale */
+	asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
+	if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
+		pr_err("%s: not XScale!\n", driver_name);
+		return -ENODEV;
+	}
+
+	/* trigger chiprev-specific logic */
+	switch (chiprev & CP15R0_PRODREV_MASK) {
+#if	defined(CONFIG_ARCH_PXA)
+	case PXA255_A0:
+		dev->has_cfr = 1;
+		break;
+	case PXA250_A0:
+	case PXA250_A1:
+		/* A0/A1 "not released"; ep 13, 15 unusable */
+		/* fall through */
+	case PXA250_B2: case PXA210_B2:
+	case PXA250_B1: case PXA210_B1:
+	case PXA250_B0: case PXA210_B0:
+		/* OUT-DMA is broken ... */
+		/* fall through */
+	case PXA250_C0: case PXA210_C0:
+		break;
+#elif	defined(CONFIG_ARCH_IXP4XX)
+	case IXP425_A0:
+	case IXP425_B0:
+	case IXP465_AD:
+		dev->has_cfr = 1;
+		break;
+#endif
+	default:
+		pr_err("%s: unrecognized processor: %08x\n",
+			driver_name, chiprev);
+		/* iop3xx, ixp4xx, ... */
+		return -ENODEV;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return -ENODEV;
+
+	dev->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dev->clk)) {
+		retval = PTR_ERR(dev->clk);
+		goto err_clk;
+	}
+
+	pr_debug("%s: IRQ %d%s%s\n", driver_name, irq,
+		dev->has_cfr ? "" : " (!cfr)",
+		SIZE_STR "(pio)"
+		);
+
+	/* other non-static parts of init */
+	dev->dev = &pdev->dev;
+	dev->mach = pdev->dev.platform_data;
+
+	dev->transceiver = usb_get_transceiver();
+
+	if (gpio_is_valid(dev->mach->gpio_pullup)) {
+		if ((retval = gpio_request(dev->mach->gpio_pullup,
+				"pca25x_udc GPIO PULLUP"))) {
+			dev_dbg(&pdev->dev,
+				"can't get pullup gpio %d, err: %d\n",
+				dev->mach->gpio_pullup, retval);
+			goto err_gpio_pullup;
+		}
+		gpio_direction_output(dev->mach->gpio_pullup, 0);
+	}
+
+	init_timer(&dev->timer);
+	dev->timer.function = udc_watchdog;
+	dev->timer.data = (unsigned long) dev;
+
+	device_initialize(&dev->gadget.dev);
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+	the_controller = dev;
+	platform_set_drvdata(pdev, dev);
+
+	udc_disable(dev);
+	udc_reinit(dev);
+
+	dev->vbus = 0;
+
+	/* irq setup after old hardware state is cleaned up */
+	retval = request_irq(irq, pxa25x_udc_irq,
+			0, driver_name, dev);
+	if (retval != 0) {
+		pr_err("%s: can't get irq %d, err %d\n",
+			driver_name, irq, retval);
+		goto err_irq1;
+	}
+	dev->got_irq = 1;
+
+#ifdef CONFIG_ARCH_LUBBOCK
+	if (machine_is_lubbock()) {
+		retval = request_irq(LUBBOCK_USB_DISC_IRQ,
+				lubbock_vbus_irq,
+				IRQF_SAMPLE_RANDOM,
+				driver_name, dev);
+		if (retval != 0) {
+			pr_err("%s: can't get irq %i, err %d\n",
+				driver_name, LUBBOCK_USB_DISC_IRQ, retval);
+			goto err_irq_lub;
+		}
+		retval = request_irq(LUBBOCK_USB_IRQ,
+				lubbock_vbus_irq,
+				IRQF_SAMPLE_RANDOM,
+				driver_name, dev);
+		if (retval != 0) {
+			pr_err("%s: can't get irq %i, err %d\n",
+				driver_name, LUBBOCK_USB_IRQ, retval);
+			goto lubbock_fail0;
+		}
+	} else
+#endif
+	create_debug_files(dev);
+
+	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+	if (!retval)
+		return retval;
+
+	remove_debug_files(dev);
+#ifdef	CONFIG_ARCH_LUBBOCK
+lubbock_fail0:
+	free_irq(LUBBOCK_USB_DISC_IRQ, dev);
+ err_irq_lub:
+	free_irq(irq, dev);
+#endif
+ err_irq1:
+	if (gpio_is_valid(dev->mach->gpio_pullup))
+		gpio_free(dev->mach->gpio_pullup);
+ err_gpio_pullup:
+	if (dev->transceiver) {
+		usb_put_transceiver(dev->transceiver);
+		dev->transceiver = NULL;
+	}
+	clk_put(dev->clk);
+ err_clk:
+	return retval;
+}
+
+static void pxa25x_udc_shutdown(struct platform_device *_dev)
+{
+	pullup_off();
+}
+
+static int __exit pxa25x_udc_remove(struct platform_device *pdev)
+{
+	struct pxa25x_udc *dev = platform_get_drvdata(pdev);
+
+	usb_del_gadget_udc(&dev->gadget);
+	if (dev->driver)
+		return -EBUSY;
+
+	dev->pullup = 0;
+	pullup(dev);
+
+	remove_debug_files(dev);
+
+	if (dev->got_irq) {
+		free_irq(platform_get_irq(pdev, 0), dev);
+		dev->got_irq = 0;
+	}
+#ifdef CONFIG_ARCH_LUBBOCK
+	if (machine_is_lubbock()) {
+		free_irq(LUBBOCK_USB_DISC_IRQ, dev);
+		free_irq(LUBBOCK_USB_IRQ, dev);
+	}
+#endif
+	if (gpio_is_valid(dev->mach->gpio_pullup))
+		gpio_free(dev->mach->gpio_pullup);
+
+	clk_put(dev->clk);
+
+	if (dev->transceiver) {
+		usb_put_transceiver(dev->transceiver);
+		dev->transceiver = NULL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+	the_controller = NULL;
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_PM
+
+/* USB suspend (controlled by the host) and system suspend (controlled
+ * by the PXA) don't necessarily work well together.  If USB is active,
+ * the 48 MHz clock is required; so the system can't enter 33 MHz idle
+ * mode, or any deeper PM saving state.
+ *
+ * For now, we punt and forcibly disconnect from the USB host when PXA
+ * enters any suspend state.  While we're disconnected, we always disable
+ * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
+ * Boards without software pullup control shouldn't use those states.
+ * VBUS IRQs should probably be ignored so that the PXA device just acts
+ * "dead" to USB hosts until system resume.
+ */
+static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state)
+{
+	struct pxa25x_udc	*udc = platform_get_drvdata(dev);
+	unsigned long flags;
+
+	if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
+		WARNING("USB host won't detect disconnect!\n");
+	udc->suspended = 1;
+
+	local_irq_save(flags);
+	pullup(udc);
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+static int pxa25x_udc_resume(struct platform_device *dev)
+{
+	struct pxa25x_udc	*udc = platform_get_drvdata(dev);
+	unsigned long flags;
+
+	udc->suspended = 0;
+	local_irq_save(flags);
+	pullup(udc);
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+#else
+#define	pxa25x_udc_suspend	NULL
+#define	pxa25x_udc_resume	NULL
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+	.shutdown	= pxa25x_udc_shutdown,
+	.remove		= __exit_p(pxa25x_udc_remove),
+	.suspend	= pxa25x_udc_suspend,
+	.resume		= pxa25x_udc_resume,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "pxa25x-udc",
+	},
+};
+
+static int __init udc_init(void)
+{
+	pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
+	return platform_driver_probe(&udc_driver, pxa25x_udc_probe);
+}
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+	platform_driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa25x-udc");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa25x_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa25x_udc.h
new file mode 100644
index 0000000..893e917
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa25x_udc.h
@@ -0,0 +1,252 @@
+/*
+ * Intel PXA25x on-chip full speed USB device controller
+ *
+ * Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix
+ * Copyright (C) 2003 David Brownell
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_USB_GADGET_PXA25X_H
+#define __LINUX_USB_GADGET_PXA25X_H
+
+#include <linux/types.h>
+
+/*-------------------------------------------------------------------------*/
+
+/* pxa25x has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
+#define UFNRH_SIR	(1 << 7)	/* SOF interrupt request */
+#define UFNRH_SIM	(1 << 6)	/* SOF interrupt mask */
+#define UFNRH_IPE14	(1 << 5)	/* ISO packet error, ep14 */
+#define UFNRH_IPE9	(1 << 4)	/* ISO packet error, ep9 */
+#define UFNRH_IPE4	(1 << 3)	/* ISO packet error, ep4 */
+
+/* pxa255 has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
+#define	UDCCFR		UDC_RES2	/* UDC Control Function Register */
+#define UDCCFR_AREN	(1 << 7)	/* ACK response enable (now) */
+#define UDCCFR_ACM	(1 << 2)	/* ACK control mode (wait for AREN) */
+
+/* latest pxa255 errata define new "must be one" bits in UDCCFR */
+#define	UDCCFR_MB1	(0xff & ~(UDCCFR_AREN|UDCCFR_ACM))
+
+/*-------------------------------------------------------------------------*/
+
+struct pxa25x_udc;
+
+struct pxa25x_ep {
+	struct usb_ep				ep;
+	struct pxa25x_udc			*dev;
+
+	const struct usb_endpoint_descriptor	*desc;
+	struct list_head			queue;
+	unsigned long				pio_irqs;
+
+	unsigned short				fifo_size;
+	u8					bEndpointAddress;
+	u8					bmAttributes;
+
+	unsigned				stopped : 1;
+	unsigned				dma_fixup : 1;
+
+	/* UDCCS = UDC Control/Status for this EP
+	 * UBCR = UDC Byte Count Remaining (contents of OUT fifo)
+	 * UDDR = UDC Endpoint Data Register (the fifo)
+	 * DRCM = DMA Request Channel Map
+	 */
+	volatile u32				*reg_udccs;
+	volatile u32				*reg_ubcr;
+	volatile u32				*reg_uddr;
+};
+
+struct pxa25x_request {
+	struct usb_request			req;
+	struct list_head			queue;
+};
+
+enum ep0_state {
+	EP0_IDLE,
+	EP0_IN_DATA_PHASE,
+	EP0_OUT_DATA_PHASE,
+	EP0_END_XFER,
+	EP0_STALL,
+};
+
+#define EP0_FIFO_SIZE	((unsigned)16)
+#define BULK_FIFO_SIZE	((unsigned)64)
+#define ISO_FIFO_SIZE	((unsigned)256)
+#define INT_FIFO_SIZE	((unsigned)8)
+
+struct udc_stats {
+	struct ep0stats {
+		unsigned long		ops;
+		unsigned long		bytes;
+	} read, write;
+	unsigned long			irqs;
+};
+
+#ifdef CONFIG_USB_PXA25X_SMALL
+/* when memory's tight, SMALL config saves code+data.  */
+#define	PXA_UDC_NUM_ENDPOINTS	3
+#endif
+
+#ifndef	PXA_UDC_NUM_ENDPOINTS
+#define	PXA_UDC_NUM_ENDPOINTS	16
+#endif
+
+struct pxa25x_udc {
+	struct usb_gadget			gadget;
+	struct usb_gadget_driver		*driver;
+
+	enum ep0_state				ep0state;
+	struct udc_stats			stats;
+	unsigned				got_irq : 1,
+						vbus : 1,
+						pullup : 1,
+						has_cfr : 1,
+						req_pending : 1,
+						req_std : 1,
+						req_config : 1,
+						suspended : 1,
+						active : 1;
+
+#define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200))
+	struct timer_list			timer;
+
+	struct device				*dev;
+	struct clk				*clk;
+	struct pxa2xx_udc_mach_info		*mach;
+	struct usb_phy				*transceiver;
+	u64					dma_mask;
+	struct pxa25x_ep			ep [PXA_UDC_NUM_ENDPOINTS];
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+	struct dentry				*debugfs_udc;
+#endif
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_LUBBOCK
+#include <mach/lubbock.h>
+/* lubbock can also report usb connect/disconnect irqs */
+#endif
+
+static struct pxa25x_udc *the_controller;
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Debugging support vanishes in non-debug builds.  DBG_NORMAL should be
+ * mostly silent during normal use/testing, with no timing side-effects.
+ */
+#define DBG_NORMAL	1	/* error paths, device state transitions */
+#define DBG_VERBOSE	2	/* add some success path trace info */
+#define DBG_NOISY	3	/* ... even more: request level */
+#define DBG_VERY_NOISY	4	/* ... even more: packet level */
+
+#define DMSG(stuff...)	pr_debug("udc: " stuff)
+
+#ifdef DEBUG
+
+static const char *state_name[] = {
+	"EP0_IDLE",
+	"EP0_IN_DATA_PHASE", "EP0_OUT_DATA_PHASE",
+	"EP0_END_XFER", "EP0_STALL"
+};
+
+#ifdef VERBOSE_DEBUG
+#    define UDC_DEBUG DBG_VERBOSE
+#else
+#    define UDC_DEBUG DBG_NORMAL
+#endif
+
+static void __maybe_unused
+dump_udccr(const char *label)
+{
+	u32	udccr = UDCCR;
+	DMSG("%s %02X =%s%s%s%s%s%s%s%s\n",
+		label, udccr,
+		(udccr & UDCCR_REM) ? " rem" : "",
+		(udccr & UDCCR_RSTIR) ? " rstir" : "",
+		(udccr & UDCCR_SRM) ? " srm" : "",
+		(udccr & UDCCR_SUSIR) ? " susir" : "",
+		(udccr & UDCCR_RESIR) ? " resir" : "",
+		(udccr & UDCCR_RSM) ? " rsm" : "",
+		(udccr & UDCCR_UDA) ? " uda" : "",
+		(udccr & UDCCR_UDE) ? " ude" : "");
+}
+
+static void __maybe_unused
+dump_udccs0(const char *label)
+{
+	u32		udccs0 = UDCCS0;
+
+	DMSG("%s %s %02X =%s%s%s%s%s%s%s%s\n",
+		label, state_name[the_controller->ep0state], udccs0,
+		(udccs0 & UDCCS0_SA) ? " sa" : "",
+		(udccs0 & UDCCS0_RNE) ? " rne" : "",
+		(udccs0 & UDCCS0_FST) ? " fst" : "",
+		(udccs0 & UDCCS0_SST) ? " sst" : "",
+		(udccs0 & UDCCS0_DRWF) ? " dwrf" : "",
+		(udccs0 & UDCCS0_FTF) ? " ftf" : "",
+		(udccs0 & UDCCS0_IPR) ? " ipr" : "",
+		(udccs0 & UDCCS0_OPR) ? " opr" : "");
+}
+
+static void __maybe_unused
+dump_state(struct pxa25x_udc *dev)
+{
+	u32		tmp;
+	unsigned	i;
+
+	DMSG("%s, uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
+		state_name[dev->ep0state],
+		UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
+	dump_udccr("udccr");
+	if (dev->has_cfr) {
+		tmp = UDCCFR;
+		DMSG("udccfr %02X =%s%s\n", tmp,
+			(tmp & UDCCFR_AREN) ? " aren" : "",
+			(tmp & UDCCFR_ACM) ? " acm" : "");
+	}
+
+	if (!dev->driver) {
+		DMSG("no gadget driver bound\n");
+		return;
+	} else
+		DMSG("ep0 driver '%s'\n", dev->driver->driver.name);
+
+	dump_udccs0 ("udccs0");
+	DMSG("ep0 IN %lu/%lu, OUT %lu/%lu\n",
+		dev->stats.write.bytes, dev->stats.write.ops,
+		dev->stats.read.bytes, dev->stats.read.ops);
+
+	for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+		if (dev->ep [i].desc == NULL)
+			continue;
+		DMSG ("udccs%d = %02x\n", i, *dev->ep->reg_udccs);
+	}
+}
+
+#else
+
+#define	dump_udccr(x)	do{}while(0)
+#define	dump_udccs0(x)	do{}while(0)
+#define	dump_state(x)	do{}while(0)
+
+#define UDC_DEBUG ((unsigned)0)
+
+#endif
+
+#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
+
+#define ERR(stuff...)		pr_err("udc: " stuff)
+#define WARNING(stuff...)	pr_warning("udc: " stuff)
+#define INFO(stuff...)		pr_info("udc: " stuff)
+
+
+#endif /* __LINUX_USB_GADGET_PXA25X_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa27x_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa27x_udc.c
new file mode 100644
index 0000000..98acb3a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa27x_udc.c
@@ -0,0 +1,2679 @@
+/*
+ * Handles the Intel 27x USB Device Controller (UDC)
+ *
+ * Inspired by original driver by Frank Becker, David Brownell, and others.
+ * Copyright (C) 2008 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+
+#include <asm/byteorder.h>
+#include <mach/hardware.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <mach/udc.h>
+
+#include "pxa27x_udc.h"
+
+/*
+ * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x
+ * series processors.
+ *
+ * Such controller drivers work with a gadget driver.  The gadget driver
+ * returns descriptors, implements configuration and data protocols used
+ * by the host to interact with this device, and allocates endpoints to
+ * the different protocol interfaces.  The controller driver virtualizes
+ * usb hardware so that the gadget drivers will be more portable.
+ *
+ * This UDC hardware wants to implement a bit too much USB protocol. The
+ * biggest issues are:  that the endpoints have to be set up before the
+ * controller can be enabled (minor, and not uncommon); and each endpoint
+ * can only have one configuration, interface and alternative interface
+ * number (major, and very unusual). Once set up, these cannot be changed
+ * without a controller reset.
+ *
+ * The workaround is to setup all combinations necessary for the gadgets which
+ * will work with this driver. This is done in pxa_udc structure, statically.
+ * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep.
+ * (You could modify this if needed.  Some drivers have a "fifo_mode" module
+ * parameter to facilitate such changes.)
+ *
+ * The combinations have been tested with these gadgets :
+ *  - zero gadget
+ *  - file storage gadget
+ *  - ether gadget
+ *
+ * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is
+ * made of UDC's double buffering either. USB "On-The-Go" is not implemented.
+ *
+ * All the requests are handled the same way :
+ *  - the drivers tries to handle the request directly to the IO
+ *  - if the IO fifo is not big enough, the remaining is send/received in
+ *    interrupt handling.
+ */
+
+#define	DRIVER_VERSION	"2008-04-18"
+#define	DRIVER_DESC	"PXA 27x USB Device Controller driver"
+
+static const char driver_name[] = "pxa27x_udc";
+static struct pxa_udc *the_controller;
+
+static void handle_ep(struct pxa_ep *ep);
+
+/*
+ * Debug filesystem
+ */
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+static int state_dbg_show(struct seq_file *s, void *p)
+{
+	struct pxa_udc *udc = s->private;
+	int pos = 0, ret;
+	u32 tmp;
+
+	ret = -ENODEV;
+	if (!udc->driver)
+		goto out;
+
+	/* basic device status */
+	pos += seq_printf(s, DRIVER_DESC "\n"
+			 "%s version: %s\nGadget driver: %s\n",
+			 driver_name, DRIVER_VERSION,
+			 udc->driver ? udc->driver->driver.name : "(none)");
+
+	tmp = udc_readl(udc, UDCCR);
+	pos += seq_printf(s,
+			 "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), "
+			 "con=%d,inter=%d,altinter=%d\n", tmp,
+			 (tmp & UDCCR_OEN) ? " oen":"",
+			 (tmp & UDCCR_AALTHNP) ? " aalthnp":"",
+			 (tmp & UDCCR_AHNP) ? " rem" : "",
+			 (tmp & UDCCR_BHNP) ? " rstir" : "",
+			 (tmp & UDCCR_DWRE) ? " dwre" : "",
+			 (tmp & UDCCR_SMAC) ? " smac" : "",
+			 (tmp & UDCCR_EMCE) ? " emce" : "",
+			 (tmp & UDCCR_UDR) ? " udr" : "",
+			 (tmp & UDCCR_UDA) ? " uda" : "",
+			 (tmp & UDCCR_UDE) ? " ude" : "",
+			 (tmp & UDCCR_ACN) >> UDCCR_ACN_S,
+			 (tmp & UDCCR_AIN) >> UDCCR_AIN_S,
+			 (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S);
+	/* registers for device and ep0 */
+	pos += seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n",
+			udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1));
+	pos += seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n",
+			udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1));
+	pos += seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR));
+	pos += seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, "
+			"reconfig=%lu\n",
+			udc->stats.irqs_reset, udc->stats.irqs_suspend,
+			udc->stats.irqs_resume, udc->stats.irqs_reconfig);
+
+	ret = 0;
+out:
+	return ret;
+}
+
+static int queues_dbg_show(struct seq_file *s, void *p)
+{
+	struct pxa_udc *udc = s->private;
+	struct pxa_ep *ep;
+	struct pxa27x_request *req;
+	int pos = 0, i, maxpkt, ret;
+
+	ret = -ENODEV;
+	if (!udc->driver)
+		goto out;
+
+	/* dump endpoint queues */
+	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
+		ep = &udc->pxa_ep[i];
+		maxpkt = ep->fifo_size;
+		pos += seq_printf(s,  "%-12s max_pkt=%d %s\n",
+				EPNAME(ep), maxpkt, "pio");
+
+		if (list_empty(&ep->queue)) {
+			pos += seq_printf(s, "\t(nothing queued)\n");
+			continue;
+		}
+
+		list_for_each_entry(req, &ep->queue, queue) {
+			pos += seq_printf(s,  "\treq %p len %d/%d buf %p\n",
+					&req->req, req->req.actual,
+					req->req.length, req->req.buf);
+		}
+	}
+
+	ret = 0;
+out:
+	return ret;
+}
+
+static int eps_dbg_show(struct seq_file *s, void *p)
+{
+	struct pxa_udc *udc = s->private;
+	struct pxa_ep *ep;
+	int pos = 0, i, ret;
+	u32 tmp;
+
+	ret = -ENODEV;
+	if (!udc->driver)
+		goto out;
+
+	ep = &udc->pxa_ep[0];
+	tmp = udc_ep_readl(ep, UDCCSR);
+	pos += seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n", tmp,
+			 (tmp & UDCCSR0_SA) ? " sa" : "",
+			 (tmp & UDCCSR0_RNE) ? " rne" : "",
+			 (tmp & UDCCSR0_FST) ? " fst" : "",
+			 (tmp & UDCCSR0_SST) ? " sst" : "",
+			 (tmp & UDCCSR0_DME) ? " dme" : "",
+			 (tmp & UDCCSR0_IPR) ? " ipr" : "",
+			 (tmp & UDCCSR0_OPC) ? " opc" : "");
+	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
+		ep = &udc->pxa_ep[i];
+		tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR);
+		pos += seq_printf(s, "%-12s: "
+				"IN %lu(%lu reqs), OUT %lu(%lu reqs), "
+				"irqs=%lu, udccr=0x%08x, udccsr=0x%03x, "
+				"udcbcr=%d\n",
+				EPNAME(ep),
+				ep->stats.in_bytes, ep->stats.in_ops,
+				ep->stats.out_bytes, ep->stats.out_ops,
+				ep->stats.irqs,
+				tmp, udc_ep_readl(ep, UDCCSR),
+				udc_ep_readl(ep, UDCBCR));
+	}
+
+	ret = 0;
+out:
+	return ret;
+}
+
+static int eps_dbg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, eps_dbg_show, inode->i_private);
+}
+
+static int queues_dbg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, queues_dbg_show, inode->i_private);
+}
+
+static int state_dbg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, state_dbg_show, inode->i_private);
+}
+
+static const struct file_operations state_dbg_fops = {
+	.owner		= THIS_MODULE,
+	.open		= state_dbg_open,
+	.llseek		= seq_lseek,
+	.read		= seq_read,
+	.release	= single_release,
+};
+
+static const struct file_operations queues_dbg_fops = {
+	.owner		= THIS_MODULE,
+	.open		= queues_dbg_open,
+	.llseek		= seq_lseek,
+	.read		= seq_read,
+	.release	= single_release,
+};
+
+static const struct file_operations eps_dbg_fops = {
+	.owner		= THIS_MODULE,
+	.open		= eps_dbg_open,
+	.llseek		= seq_lseek,
+	.read		= seq_read,
+	.release	= single_release,
+};
+
+static void pxa_init_debugfs(struct pxa_udc *udc)
+{
+	struct dentry *root, *state, *queues, *eps;
+
+	root = debugfs_create_dir(udc->gadget.name, NULL);
+	if (IS_ERR(root) || !root)
+		goto err_root;
+
+	state = debugfs_create_file("udcstate", 0400, root, udc,
+			&state_dbg_fops);
+	if (!state)
+		goto err_state;
+	queues = debugfs_create_file("queues", 0400, root, udc,
+			&queues_dbg_fops);
+	if (!queues)
+		goto err_queues;
+	eps = debugfs_create_file("epstate", 0400, root, udc,
+			&eps_dbg_fops);
+	if (!eps)
+		goto err_eps;
+
+	udc->debugfs_root = root;
+	udc->debugfs_state = state;
+	udc->debugfs_queues = queues;
+	udc->debugfs_eps = eps;
+	return;
+err_eps:
+	debugfs_remove(eps);
+err_queues:
+	debugfs_remove(queues);
+err_state:
+	debugfs_remove(root);
+err_root:
+	dev_err(udc->dev, "debugfs is not available\n");
+}
+
+static void pxa_cleanup_debugfs(struct pxa_udc *udc)
+{
+	debugfs_remove(udc->debugfs_eps);
+	debugfs_remove(udc->debugfs_queues);
+	debugfs_remove(udc->debugfs_state);
+	debugfs_remove(udc->debugfs_root);
+	udc->debugfs_eps = NULL;
+	udc->debugfs_queues = NULL;
+	udc->debugfs_state = NULL;
+	udc->debugfs_root = NULL;
+}
+
+#else
+static inline void pxa_init_debugfs(struct pxa_udc *udc)
+{
+}
+
+static inline void pxa_cleanup_debugfs(struct pxa_udc *udc)
+{
+}
+#endif
+
+/**
+ * is_match_usb_pxa - check if usb_ep and pxa_ep match
+ * @udc_usb_ep: usb endpoint
+ * @ep: pxa endpoint
+ * @config: configuration required in pxa_ep
+ * @interface: interface required in pxa_ep
+ * @altsetting: altsetting required in pxa_ep
+ *
+ * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise
+ */
+static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep,
+		int config, int interface, int altsetting)
+{
+	if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr)
+		return 0;
+	if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in)
+		return 0;
+	if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type)
+		return 0;
+	if ((ep->config != config) || (ep->interface != interface)
+			|| (ep->alternate != altsetting))
+		return 0;
+	return 1;
+}
+
+/**
+ * find_pxa_ep - find pxa_ep structure matching udc_usb_ep
+ * @udc: pxa udc
+ * @udc_usb_ep: udc_usb_ep structure
+ *
+ * Match udc_usb_ep and all pxa_ep available, to see if one matches.
+ * This is necessary because of the strong pxa hardware restriction requiring
+ * that once pxa endpoints are initialized, their configuration is freezed, and
+ * no change can be made to their address, direction, or in which configuration,
+ * interface or altsetting they are active ... which differs from more usual
+ * models which have endpoints be roughly just addressable fifos, and leave
+ * configuration events up to gadget drivers (like all control messages).
+ *
+ * Note that there is still a blurred point here :
+ *   - we rely on UDCCR register "active interface" and "active altsetting".
+ *     This is a nonsense in regard of USB spec, where multiple interfaces are
+ *     active at the same time.
+ *   - if we knew for sure that the pxa can handle multiple interface at the
+ *     same time, assuming Intel's Developer Guide is wrong, this function
+ *     should be reviewed, and a cache of couples (iface, altsetting) should
+ *     be kept in the pxa_udc structure. In this case this function would match
+ *     against the cache of couples instead of the "last altsetting" set up.
+ *
+ * Returns the matched pxa_ep structure or NULL if none found
+ */
+static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc,
+		struct udc_usb_ep *udc_usb_ep)
+{
+	int i;
+	struct pxa_ep *ep;
+	int cfg = udc->config;
+	int iface = udc->last_interface;
+	int alt = udc->last_alternate;
+
+	if (udc_usb_ep == &udc->udc_usb_ep[0])
+		return &udc->pxa_ep[0];
+
+	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
+		ep = &udc->pxa_ep[i];
+		if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt))
+			return ep;
+	}
+	return NULL;
+}
+
+/**
+ * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep
+ * @udc: pxa udc
+ *
+ * Context: in_interrupt()
+ *
+ * Updates all pxa_ep fields in udc_usb_ep structures, if this field was
+ * previously set up (and is not NULL). The update is necessary is a
+ * configuration change or altsetting change was issued by the USB host.
+ */
+static void update_pxa_ep_matches(struct pxa_udc *udc)
+{
+	int i;
+	struct udc_usb_ep *udc_usb_ep;
+
+	for (i = 1; i < NR_USB_ENDPOINTS; i++) {
+		udc_usb_ep = &udc->udc_usb_ep[i];
+		if (udc_usb_ep->pxa_ep)
+			udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep);
+	}
+}
+
+/**
+ * pio_irq_enable - Enables irq generation for one endpoint
+ * @ep: udc endpoint
+ */
+static void pio_irq_enable(struct pxa_ep *ep)
+{
+	struct pxa_udc *udc = ep->dev;
+	int index = EPIDX(ep);
+	u32 udcicr0 = udc_readl(udc, UDCICR0);
+	u32 udcicr1 = udc_readl(udc, UDCICR1);
+
+	if (index < 16)
+		udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2)));
+	else
+		udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2)));
+}
+
+/**
+ * pio_irq_disable - Disables irq generation for one endpoint
+ * @ep: udc endpoint
+ */
+static void pio_irq_disable(struct pxa_ep *ep)
+{
+	struct pxa_udc *udc = ep->dev;
+	int index = EPIDX(ep);
+	u32 udcicr0 = udc_readl(udc, UDCICR0);
+	u32 udcicr1 = udc_readl(udc, UDCICR1);
+
+	if (index < 16)
+		udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2)));
+	else
+		udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2)));
+}
+
+/**
+ * udc_set_mask_UDCCR - set bits in UDCCR
+ * @udc: udc device
+ * @mask: bits to set in UDCCR
+ *
+ * Sets bits in UDCCR, leaving DME and FST bits as they were.
+ */
+static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask)
+{
+	u32 udccr = udc_readl(udc, UDCCR);
+	udc_writel(udc, UDCCR,
+			(udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS));
+}
+
+/**
+ * udc_clear_mask_UDCCR - clears bits in UDCCR
+ * @udc: udc device
+ * @mask: bit to clear in UDCCR
+ *
+ * Clears bits in UDCCR, leaving DME and FST bits as they were.
+ */
+static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
+{
+	u32 udccr = udc_readl(udc, UDCCR);
+	udc_writel(udc, UDCCR,
+			(udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
+}
+
+/**
+ * ep_write_UDCCSR - set bits in UDCCSR
+ * @udc: udc device
+ * @mask: bits to set in UDCCR
+ *
+ * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
+ *
+ * A specific case is applied to ep0 : the ACM bit is always set to 1, for
+ * SET_INTERFACE and SET_CONFIGURATION.
+ */
+static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
+{
+	if (is_ep0(ep))
+		mask |= UDCCSR0_ACM;
+	udc_ep_writel(ep, UDCCSR, mask);
+}
+
+/**
+ * ep_count_bytes_remain - get how many bytes in udc endpoint
+ * @ep: udc endpoint
+ *
+ * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP)
+ */
+static int ep_count_bytes_remain(struct pxa_ep *ep)
+{
+	if (ep->dir_in)
+		return -EOPNOTSUPP;
+	return udc_ep_readl(ep, UDCBCR) & 0x3ff;
+}
+
+/**
+ * ep_is_empty - checks if ep has byte ready for reading
+ * @ep: udc endpoint
+ *
+ * If endpoint is the control endpoint, checks if there are bytes in the
+ * control endpoint fifo. If endpoint is a data endpoint, checks if bytes
+ * are ready for reading on OUT endpoint.
+ *
+ * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
+ */
+static int ep_is_empty(struct pxa_ep *ep)
+{
+	int ret;
+
+	if (!is_ep0(ep) && ep->dir_in)
+		return -EOPNOTSUPP;
+	if (is_ep0(ep))
+		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE);
+	else
+		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE);
+	return ret;
+}
+
+/**
+ * ep_is_full - checks if ep has place to write bytes
+ * @ep: udc endpoint
+ *
+ * If endpoint is not the control endpoint and is an IN endpoint, checks if
+ * there is place to write bytes into the endpoint.
+ *
+ * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
+ */
+static int ep_is_full(struct pxa_ep *ep)
+{
+	if (is_ep0(ep))
+		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR);
+	if (!ep->dir_in)
+		return -EOPNOTSUPP;
+	return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF));
+}
+
+/**
+ * epout_has_pkt - checks if OUT endpoint fifo has a packet available
+ * @ep: pxa endpoint
+ *
+ * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
+ */
+static int epout_has_pkt(struct pxa_ep *ep)
+{
+	if (!is_ep0(ep) && ep->dir_in)
+		return -EOPNOTSUPP;
+	if (is_ep0(ep))
+		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC);
+	return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC);
+}
+
+/**
+ * set_ep0state - Set ep0 automata state
+ * @dev: udc device
+ * @state: state
+ */
+static void set_ep0state(struct pxa_udc *udc, int state)
+{
+	struct pxa_ep *ep = &udc->pxa_ep[0];
+	char *old_stname = EP0_STNAME(udc);
+
+	udc->ep0state = state;
+	ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname,
+		EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR),
+		udc_ep_readl(ep, UDCBCR));
+}
+
+/**
+ * ep0_idle - Put control endpoint into idle state
+ * @dev: udc device
+ */
+static void ep0_idle(struct pxa_udc *dev)
+{
+	set_ep0state(dev, WAIT_FOR_SETUP);
+}
+
+/**
+ * inc_ep_stats_reqs - Update ep stats counts
+ * @ep: physical endpoint
+ * @req: usb request
+ * @is_in: ep direction (USB_DIR_IN or 0)
+ *
+ */
+static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
+{
+	if (is_in)
+		ep->stats.in_ops++;
+	else
+		ep->stats.out_ops++;
+}
+
+/**
+ * inc_ep_stats_bytes - Update ep stats counts
+ * @ep: physical endpoint
+ * @count: bytes transferred on endpoint
+ * @is_in: ep direction (USB_DIR_IN or 0)
+ */
+static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
+{
+	if (is_in)
+		ep->stats.in_bytes += count;
+	else
+		ep->stats.out_bytes += count;
+}
+
+/**
+ * pxa_ep_setup - Sets up an usb physical endpoint
+ * @ep: pxa27x physical endpoint
+ *
+ * Find the physical pxa27x ep, and setup its UDCCR
+ */
+static __init void pxa_ep_setup(struct pxa_ep *ep)
+{
+	u32 new_udccr;
+
+	new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN)
+		| ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN)
+		| ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN)
+		| ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN)
+		| ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET)
+		| ((ep->dir_in) ? UDCCONR_ED : 0)
+		| ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS)
+		| UDCCONR_EE;
+
+	udc_ep_writel(ep, UDCCR, new_udccr);
+}
+
+/**
+ * pxa_eps_setup - Sets up all usb physical endpoints
+ * @dev: udc device
+ *
+ * Setup all pxa physical endpoints, except ep0
+ */
+static __init void pxa_eps_setup(struct pxa_udc *dev)
+{
+	unsigned int i;
+
+	dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev);
+
+	for (i = 1; i < NR_PXA_ENDPOINTS; i++)
+		pxa_ep_setup(&dev->pxa_ep[i]);
+}
+
+/**
+ * pxa_ep_alloc_request - Allocate usb request
+ * @_ep: usb endpoint
+ * @gfp_flags:
+ *
+ * For the pxa27x, these can just wrap kmalloc/kfree.  gadget drivers
+ * must still pass correctly initialized endpoints, since other controller
+ * drivers may care about how it's currently set up (dma issues etc).
+  */
+static struct usb_request *
+pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct pxa27x_request *req;
+
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+	req->in_use = 0;
+	req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+
+	return &req->req;
+}
+
+/**
+ * pxa_ep_free_request - Free usb request
+ * @_ep: usb endpoint
+ * @_req: usb request
+ *
+ * Wrapper around kfree to free _req
+ */
+static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct pxa27x_request *req;
+
+	req = container_of(_req, struct pxa27x_request, req);
+	WARN_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+/**
+ * ep_add_request - add a request to the endpoint's queue
+ * @ep: usb endpoint
+ * @req: usb request
+ *
+ * Context: ep->lock held
+ *
+ * Queues the request in the endpoint's queue, and enables the interrupts
+ * on the endpoint.
+ */
+static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+	if (unlikely(!req))
+		return;
+	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
+		req->req.length, udc_ep_readl(ep, UDCCSR));
+
+	req->in_use = 1;
+	list_add_tail(&req->queue, &ep->queue);
+	pio_irq_enable(ep);
+}
+
+/**
+ * ep_del_request - removes a request from the endpoint's queue
+ * @ep: usb endpoint
+ * @req: usb request
+ *
+ * Context: ep->lock held
+ *
+ * Unqueue the request from the endpoint's queue. If there are no more requests
+ * on the endpoint, and if it's not the control endpoint, interrupts are
+ * disabled on the endpoint.
+ */
+static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+	if (unlikely(!req))
+		return;
+	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
+		req->req.length, udc_ep_readl(ep, UDCCSR));
+
+	list_del_init(&req->queue);
+	req->in_use = 0;
+	if (!is_ep0(ep) && list_empty(&ep->queue))
+		pio_irq_disable(ep);
+}
+
+/**
+ * req_done - Complete an usb request
+ * @ep: pxa physical endpoint
+ * @req: pxa request
+ * @status: usb request status sent to gadget API
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held if flags not NULL, else ep->lock released
+ *
+ * Retire a pxa27x usb request. Endpoint must be locked.
+ */
+static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
+	unsigned long *pflags)
+{
+	unsigned long	flags;
+
+	ep_del_request(ep, req);
+	if (likely(req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (status && status != -ESHUTDOWN)
+		ep_dbg(ep, "complete req %p stat %d len %u/%u\n",
+			&req->req, status,
+			req->req.actual, req->req.length);
+
+	if (pflags)
+		spin_unlock_irqrestore(&ep->lock, *pflags);
+	local_irq_save(flags);
+	req->req.complete(&req->udc_usb_ep->usb_ep, &req->req);
+	local_irq_restore(flags);
+	if (pflags)
+		spin_lock_irqsave(&ep->lock, *pflags);
+}
+
+/**
+ * ep_end_out_req - Ends endpoint OUT request
+ * @ep: physical endpoint
+ * @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held or released (see req_done())
+ *
+ * Ends endpoint OUT request (completes usb request).
+ */
+static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+	unsigned long *pflags)
+{
+	inc_ep_stats_reqs(ep, !USB_DIR_IN);
+	req_done(ep, req, 0, pflags);
+}
+
+/**
+ * ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
+ * @ep: physical endpoint
+ * @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held or released (see req_done())
+ *
+ * Ends control endpoint OUT request (completes usb request), and puts
+ * control endpoint into idle state
+ */
+static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+	unsigned long *pflags)
+{
+	set_ep0state(ep->dev, OUT_STATUS_STAGE);
+	ep_end_out_req(ep, req, pflags);
+	ep0_idle(ep->dev);
+}
+
+/**
+ * ep_end_in_req - Ends endpoint IN request
+ * @ep: physical endpoint
+ * @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held or released (see req_done())
+ *
+ * Ends endpoint IN request (completes usb request).
+ */
+static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+	unsigned long *pflags)
+{
+	inc_ep_stats_reqs(ep, USB_DIR_IN);
+	req_done(ep, req, 0, pflags);
+}
+
+/**
+ * ep0_end_in_req - Ends control endpoint IN request (ends data stage)
+ * @ep: physical endpoint
+ * @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held or released (see req_done())
+ *
+ * Ends control endpoint IN request (completes usb request), and puts
+ * control endpoint into status state
+ */
+static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+	unsigned long *pflags)
+{
+	set_ep0state(ep->dev, IN_STATUS_STAGE);
+	ep_end_in_req(ep, req, pflags);
+}
+
+/**
+ * nuke - Dequeue all requests
+ * @ep: pxa endpoint
+ * @status: usb request status
+ *
+ * Context: ep->lock released
+ *
+ * Dequeues all requests on an endpoint. As a side effect, interrupts will be
+ * disabled on that endpoint (because no more requests).
+ */
+static void nuke(struct pxa_ep *ep, int status)
+{
+	struct pxa27x_request	*req;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&ep->lock, flags);
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
+		req_done(ep, req, status, &flags);
+	}
+	spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+/**
+ * read_packet - transfer 1 packet from an OUT endpoint into request
+ * @ep: pxa physical endpoint
+ * @req: usb request
+ *
+ * Takes bytes from OUT endpoint and transfers them info the usb request.
+ * If there is less space in request than bytes received in OUT endpoint,
+ * bytes are left in the OUT endpoint.
+ *
+ * Returns how many bytes were actually transferred
+ */
+static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+	u32 *buf;
+	int bytes_ep, bufferspace, count, i;
+
+	bytes_ep = ep_count_bytes_remain(ep);
+	bufferspace = req->req.length - req->req.actual;
+
+	buf = (u32 *)(req->req.buf + req->req.actual);
+	prefetchw(buf);
+
+	if (likely(!ep_is_empty(ep)))
+		count = min(bytes_ep, bufferspace);
+	else /* zlp */
+		count = 0;
+
+	for (i = count; i > 0; i -= 4)
+		*buf++ = udc_ep_readl(ep, UDCDR);
+	req->req.actual += count;
+
+	ep_write_UDCCSR(ep, UDCCSR_PC);
+
+	return count;
+}
+
+/**
+ * write_packet - transfer 1 packet from request into an IN endpoint
+ * @ep: pxa physical endpoint
+ * @req: usb request
+ * @max: max bytes that fit into endpoint
+ *
+ * Takes bytes from usb request, and transfers them into the physical
+ * endpoint. If there are no bytes to transfer, doesn't write anything
+ * to physical endpoint.
+ *
+ * Returns how many bytes were actually transferred.
+ */
+static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
+			unsigned int max)
+{
+	int length, count, remain, i;
+	u32 *buf;
+	u8 *buf_8;
+
+	buf = (u32 *)(req->req.buf + req->req.actual);
+	prefetch(buf);
+
+	length = min(req->req.length - req->req.actual, max);
+	req->req.actual += length;
+
+	remain = length & 0x3;
+	count = length & ~(0x3);
+	for (i = count; i > 0 ; i -= 4)
+		udc_ep_writel(ep, UDCDR, *buf++);
+
+	buf_8 = (u8 *)buf;
+	for (i = remain; i > 0; i--)
+		udc_ep_writeb(ep, UDCDR, *buf_8++);
+
+	ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain,
+		udc_ep_readl(ep, UDCCSR));
+
+	return length;
+}
+
+/**
+ * read_fifo - Transfer packets from OUT endpoint into usb request
+ * @ep: pxa physical endpoint
+ * @req: usb request
+ *
+ * Context: callable when in_interrupt()
+ *
+ * Unload as many packets as possible from the fifo we use for usb OUT
+ * transfers and put them into the request. Caller should have made sure
+ * there's at least one packet ready.
+ * Doesn't complete the request, that's the caller's job
+ *
+ * Returns 1 if the request completed, 0 otherwise
+ */
+static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+	int count, is_short, completed = 0;
+
+	while (epout_has_pkt(ep)) {
+		count = read_packet(ep, req);
+		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
+
+		is_short = (count < ep->fifo_size);
+		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
+			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
+			&req->req, req->req.actual, req->req.length);
+
+		/* completion */
+		if (is_short || req->req.actual == req->req.length) {
+			completed = 1;
+			break;
+		}
+		/* finished that packet.  the next one may be waiting... */
+	}
+	return completed;
+}
+
+/**
+ * write_fifo - transfer packets from usb request into an IN endpoint
+ * @ep: pxa physical endpoint
+ * @req: pxa usb request
+ *
+ * Write to an IN endpoint fifo, as many packets as possible.
+ * irqs will use this to write the rest later.
+ * caller guarantees at least one packet buffer is ready (or a zlp).
+ * Doesn't complete the request, that's the caller's job
+ *
+ * Returns 1 if request fully transferred, 0 if partial transfer
+ */
+static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+	unsigned max;
+	int count, is_short, is_last = 0, completed = 0, totcount = 0;
+	u32 udccsr;
+
+	max = ep->fifo_size;
+	do {
+		is_short = 0;
+
+		udccsr = udc_ep_readl(ep, UDCCSR);
+		if (udccsr & UDCCSR_PC) {
+			ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
+				udccsr);
+			ep_write_UDCCSR(ep, UDCCSR_PC);
+		}
+		if (udccsr & UDCCSR_TRN) {
+			ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
+				udccsr);
+			ep_write_UDCCSR(ep, UDCCSR_TRN);
+		}
+
+		count = write_packet(ep, req, max);
+		inc_ep_stats_bytes(ep, count, USB_DIR_IN);
+		totcount += count;
+
+		/* last packet is usually short (or a zlp) */
+		if (unlikely(count < max)) {
+			is_last = 1;
+			is_short = 1;
+		} else {
+			if (likely(req->req.length > req->req.actual)
+					|| req->req.zero)
+				is_last = 0;
+			else
+				is_last = 1;
+			/* interrupt/iso maxpacket may not fill the fifo */
+			is_short = unlikely(max < ep->fifo_size);
+		}
+
+		if (is_short)
+			ep_write_UDCCSR(ep, UDCCSR_SP);
+
+		/* requests complete when all IN data is in the FIFO */
+		if (is_last) {
+			completed = 1;
+			break;
+		}
+	} while (!ep_is_full(ep));
+
+	ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n",
+			totcount, is_last ? "/L" : "", is_short ? "/S" : "",
+			req->req.length - req->req.actual, &req->req);
+
+	return completed;
+}
+
+/**
+ * read_ep0_fifo - Transfer packets from control endpoint into usb request
+ * @ep: control endpoint
+ * @req: pxa usb request
+ *
+ * Special ep0 version of the above read_fifo. Reads as many bytes from control
+ * endpoint as can be read, and stores them into usb request (limited by request
+ * maximum length).
+ *
+ * Returns 0 if usb request only partially filled, 1 if fully filled
+ */
+static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+	int count, is_short, completed = 0;
+
+	while (epout_has_pkt(ep)) {
+		count = read_packet(ep, req);
+		ep_write_UDCCSR(ep, UDCCSR0_OPC);
+		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
+
+		is_short = (count < ep->fifo_size);
+		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
+			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
+			&req->req, req->req.actual, req->req.length);
+
+		if (is_short || req->req.actual >= req->req.length) {
+			completed = 1;
+			break;
+		}
+	}
+
+	return completed;
+}
+
+/**
+ * write_ep0_fifo - Send a request to control endpoint (ep0 in)
+ * @ep: control endpoint
+ * @req: request
+ *
+ * Context: callable when in_interrupt()
+ *
+ * Sends a request (or a part of the request) to the control endpoint (ep0 in).
+ * If the request doesn't fit, the remaining part will be sent from irq.
+ * The request is considered fully written only if either :
+ *   - last write transferred all remaining bytes, but fifo was not fully filled
+ *   - last write was a 0 length write
+ *
+ * Returns 1 if request fully written, 0 if request only partially sent
+ */
+static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+	unsigned	count;
+	int		is_last, is_short;
+
+	count = write_packet(ep, req, EP0_FIFO_SIZE);
+	inc_ep_stats_bytes(ep, count, USB_DIR_IN);
+
+	is_short = (count < EP0_FIFO_SIZE);
+	is_last = ((count == 0) || (count < EP0_FIFO_SIZE));
+
+	/* Sends either a short packet or a 0 length packet */
+	if (unlikely(is_short))
+		ep_write_UDCCSR(ep, UDCCSR0_IPR);
+
+	ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
+		count, is_short ? "/S" : "", is_last ? "/L" : "",
+		req->req.length - req->req.actual,
+		&req->req, udc_ep_readl(ep, UDCCSR));
+
+	return is_last;
+}
+
+/**
+ * pxa_ep_queue - Queue a request into an IN endpoint
+ * @_ep: usb endpoint
+ * @_req: usb request
+ * @gfp_flags: flags
+ *
+ * Context: normally called when !in_interrupt, but callable when in_interrupt()
+ * in the special case of ep0 setup :
+ *   (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue)
+ *
+ * Returns 0 if succedeed, error otherwise
+ */
+static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+			gfp_t gfp_flags)
+{
+	struct udc_usb_ep	*udc_usb_ep;
+	struct pxa_ep		*ep;
+	struct pxa27x_request	*req;
+	struct pxa_udc		*dev;
+	unsigned long		flags;
+	int			rc = 0;
+	int			is_first_req;
+	unsigned		length;
+	int			recursion_detected;
+
+	req = container_of(_req, struct pxa27x_request, req);
+	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+
+	if (unlikely(!_req || !_req->complete || !_req->buf))
+		return -EINVAL;
+
+	if (unlikely(!_ep))
+		return -EINVAL;
+
+	dev = udc_usb_ep->dev;
+	ep = udc_usb_ep->pxa_ep;
+	if (unlikely(!ep))
+		return -EINVAL;
+
+	dev = ep->dev;
+	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+		ep_dbg(ep, "bogus device state\n");
+		return -ESHUTDOWN;
+	}
+
+	/* iso is always one packet per request, that's the only way
+	 * we can report per-packet status.  that also helps with dma.
+	 */
+	if (unlikely(EPXFERTYPE_is_ISO(ep)
+			&& req->req.length > ep->fifo_size))
+		return -EMSGSIZE;
+
+	spin_lock_irqsave(&ep->lock, flags);
+	recursion_detected = ep->in_handle_ep;
+
+	is_first_req = list_empty(&ep->queue);
+	ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
+			_req, is_first_req ? "yes" : "no",
+			_req->length, _req->buf);
+
+	if (!ep->enabled) {
+		_req->status = -ESHUTDOWN;
+		rc = -ESHUTDOWN;
+		goto out_locked;
+	}
+
+	if (req->in_use) {
+		ep_err(ep, "refusing to queue req %p (already queued)\n", req);
+		goto out_locked;
+	}
+
+	length = _req->length;
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	ep_add_request(ep, req);
+	spin_unlock_irqrestore(&ep->lock, flags);
+
+	if (is_ep0(ep)) {
+		switch (dev->ep0state) {
+		case WAIT_ACK_SET_CONF_INTERF:
+			if (length == 0) {
+				ep_end_in_req(ep, req, NULL);
+			} else {
+				ep_err(ep, "got a request of %d bytes while"
+					"in state WAIT_ACK_SET_CONF_INTERF\n",
+					length);
+				ep_del_request(ep, req);
+				rc = -EL2HLT;
+			}
+			ep0_idle(ep->dev);
+			break;
+		case IN_DATA_STAGE:
+			if (!ep_is_full(ep))
+				if (write_ep0_fifo(ep, req))
+					ep0_end_in_req(ep, req, NULL);
+			break;
+		case OUT_DATA_STAGE:
+			if ((length == 0) || !epout_has_pkt(ep))
+				if (read_ep0_fifo(ep, req))
+					ep0_end_out_req(ep, req, NULL);
+			break;
+		default:
+			ep_err(ep, "odd state %s to send me a request\n",
+				EP0_STNAME(ep->dev));
+			ep_del_request(ep, req);
+			rc = -EL2HLT;
+			break;
+		}
+	} else {
+		if (!recursion_detected)
+			handle_ep(ep);
+	}
+
+out:
+	return rc;
+out_locked:
+	spin_unlock_irqrestore(&ep->lock, flags);
+	goto out;
+}
+
+/**
+ * pxa_ep_dequeue - Dequeue one request
+ * @_ep: usb endpoint
+ * @_req: usb request
+ *
+ * Return 0 if no error, -EINVAL or -ECONNRESET otherwise
+ */
+static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct pxa_ep		*ep;
+	struct udc_usb_ep	*udc_usb_ep;
+	struct pxa27x_request	*req;
+	unsigned long		flags;
+	int			rc = -EINVAL;
+
+	if (!_ep)
+		return rc;
+	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+	ep = udc_usb_ep->pxa_ep;
+	if (!ep || is_ep0(ep))
+		return rc;
+
+	spin_lock_irqsave(&ep->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req) {
+			rc = 0;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&ep->lock, flags);
+	if (!rc)
+		req_done(ep, req, -ECONNRESET, NULL);
+	return rc;
+}
+
+/**
+ * pxa_ep_set_halt - Halts operations on one endpoint
+ * @_ep: usb endpoint
+ * @value:
+ *
+ * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise
+ */
+static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct pxa_ep		*ep;
+	struct udc_usb_ep	*udc_usb_ep;
+	unsigned long flags;
+	int rc;
+
+
+	if (!_ep)
+		return -EINVAL;
+	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+	ep = udc_usb_ep->pxa_ep;
+	if (!ep || is_ep0(ep))
+		return -EINVAL;
+
+	if (value == 0) {
+		/*
+		 * This path (reset toggle+halt) is needed to implement
+		 * SET_INTERFACE on normal hardware.  but it can't be
+		 * done from software on the PXA UDC, and the hardware
+		 * forgets to do it as part of SET_INTERFACE automagic.
+		 */
+		ep_dbg(ep, "only host can clear halt\n");
+		return -EROFS;
+	}
+
+	spin_lock_irqsave(&ep->lock, flags);
+
+	rc = -EAGAIN;
+	if (ep->dir_in	&& (ep_is_full(ep) || !list_empty(&ep->queue)))
+		goto out;
+
+	/* FST, FEF bits are the same for control and non control endpoints */
+	rc = 0;
+	ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
+	if (is_ep0(ep))
+		set_ep0state(ep->dev, STALL);
+
+out:
+	spin_unlock_irqrestore(&ep->lock, flags);
+	return rc;
+}
+
+/**
+ * pxa_ep_fifo_status - Get how many bytes in physical endpoint
+ * @_ep: usb endpoint
+ *
+ * Returns number of bytes in OUT fifos. Broken for IN fifos.
+ */
+static int pxa_ep_fifo_status(struct usb_ep *_ep)
+{
+	struct pxa_ep		*ep;
+	struct udc_usb_ep	*udc_usb_ep;
+
+	if (!_ep)
+		return -ENODEV;
+	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+	ep = udc_usb_ep->pxa_ep;
+	if (!ep || is_ep0(ep))
+		return -ENODEV;
+
+	if (ep->dir_in)
+		return -EOPNOTSUPP;
+	if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep))
+		return 0;
+	else
+		return ep_count_bytes_remain(ep) + 1;
+}
+
+/**
+ * pxa_ep_fifo_flush - Flushes one endpoint
+ * @_ep: usb endpoint
+ *
+ * Discards all data in one endpoint(IN or OUT), except control endpoint.
+ */
+static void pxa_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct pxa_ep		*ep;
+	struct udc_usb_ep	*udc_usb_ep;
+	unsigned long		flags;
+
+	if (!_ep)
+		return;
+	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+	ep = udc_usb_ep->pxa_ep;
+	if (!ep || is_ep0(ep))
+		return;
+
+	spin_lock_irqsave(&ep->lock, flags);
+
+	if (unlikely(!list_empty(&ep->queue)))
+		ep_dbg(ep, "called while queue list not empty\n");
+	ep_dbg(ep, "called\n");
+
+	/* for OUT, just read and discard the FIFO contents. */
+	if (!ep->dir_in) {
+		while (!ep_is_empty(ep))
+			udc_ep_readl(ep, UDCDR);
+	} else {
+		/* most IN status is the same, but ISO can't stall */
+		ep_write_UDCCSR(ep,
+				UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
+				| (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
+	}
+
+	spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+/**
+ * pxa_ep_enable - Enables usb endpoint
+ * @_ep: usb endpoint
+ * @desc: usb endpoint descriptor
+ *
+ * Nothing much to do here, as ep configuration is done once and for all
+ * before udc is enabled. After udc enable, no physical endpoint configuration
+ * can be changed.
+ * Function makes sanity checks and flushes the endpoint.
+ */
+static int pxa_ep_enable(struct usb_ep *_ep,
+	const struct usb_endpoint_descriptor *desc)
+{
+	struct pxa_ep		*ep;
+	struct udc_usb_ep	*udc_usb_ep;
+	struct pxa_udc		*udc;
+
+	if (!_ep || !desc)
+		return -EINVAL;
+
+	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+	if (udc_usb_ep->pxa_ep) {
+		ep = udc_usb_ep->pxa_ep;
+		ep_warn(ep, "usb_ep %s already enabled, doing nothing\n",
+			_ep->name);
+	} else {
+		ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep);
+	}
+
+	if (!ep || is_ep0(ep)) {
+		dev_err(udc_usb_ep->dev->dev,
+			"unable to match pxa_ep for ep %s\n",
+			_ep->name);
+		return -EINVAL;
+	}
+
+	if ((desc->bDescriptorType != USB_DT_ENDPOINT)
+			|| (ep->type != usb_endpoint_type(desc))) {
+		ep_err(ep, "type mismatch\n");
+		return -EINVAL;
+	}
+
+	if (ep->fifo_size < usb_endpoint_maxp(desc)) {
+		ep_err(ep, "bad maxpacket\n");
+		return -ERANGE;
+	}
+
+	udc_usb_ep->pxa_ep = ep;
+	udc = ep->dev;
+
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+		ep_err(ep, "bogus device state\n");
+		return -ESHUTDOWN;
+	}
+
+	ep->enabled = 1;
+
+	/* flush fifo (mostly for OUT buffers) */
+	pxa_ep_fifo_flush(_ep);
+
+	ep_dbg(ep, "enabled\n");
+	return 0;
+}
+
+/**
+ * pxa_ep_disable - Disable usb endpoint
+ * @_ep: usb endpoint
+ *
+ * Same as for pxa_ep_enable, no physical endpoint configuration can be
+ * changed.
+ * Function flushes the endpoint and related requests.
+ */
+static int pxa_ep_disable(struct usb_ep *_ep)
+{
+	struct pxa_ep		*ep;
+	struct udc_usb_ep	*udc_usb_ep;
+
+	if (!_ep)
+		return -EINVAL;
+
+	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+	ep = udc_usb_ep->pxa_ep;
+	if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
+		return -EINVAL;
+
+	ep->enabled = 0;
+	nuke(ep, -ESHUTDOWN);
+
+	pxa_ep_fifo_flush(_ep);
+	udc_usb_ep->pxa_ep = NULL;
+
+	ep_dbg(ep, "disabled\n");
+	return 0;
+}
+
+static struct usb_ep_ops pxa_ep_ops = {
+	.enable		= pxa_ep_enable,
+	.disable	= pxa_ep_disable,
+
+	.alloc_request	= pxa_ep_alloc_request,
+	.free_request	= pxa_ep_free_request,
+
+	.queue		= pxa_ep_queue,
+	.dequeue	= pxa_ep_dequeue,
+
+	.set_halt	= pxa_ep_set_halt,
+	.fifo_status	= pxa_ep_fifo_status,
+	.fifo_flush	= pxa_ep_fifo_flush,
+};
+
+/**
+ * dplus_pullup - Connect or disconnect pullup resistor to D+ pin
+ * @udc: udc device
+ * @on: 0 if disconnect pullup resistor, 1 otherwise
+ * Context: any
+ *
+ * Handle D+ pullup resistor, make the device visible to the usb bus, and
+ * declare it as a full speed usb device
+ */
+static void dplus_pullup(struct pxa_udc *udc, int on)
+{
+	if (on) {
+		if (gpio_is_valid(udc->mach->gpio_pullup))
+			gpio_set_value(udc->mach->gpio_pullup,
+				       !udc->mach->gpio_pullup_inverted);
+		if (udc->mach->udc_command)
+			udc->mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
+	} else {
+		if (gpio_is_valid(udc->mach->gpio_pullup))
+			gpio_set_value(udc->mach->gpio_pullup,
+				       udc->mach->gpio_pullup_inverted);
+		if (udc->mach->udc_command)
+			udc->mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
+	}
+	udc->pullup_on = on;
+}
+
+/**
+ * pxa_udc_get_frame - Returns usb frame number
+ * @_gadget: usb gadget
+ */
+static int pxa_udc_get_frame(struct usb_gadget *_gadget)
+{
+	struct pxa_udc *udc = to_gadget_udc(_gadget);
+
+	return (udc_readl(udc, UDCFNR) & 0x7ff);
+}
+
+/**
+ * pxa_udc_wakeup - Force udc device out of suspend
+ * @_gadget: usb gadget
+ *
+ * Returns 0 if successful, error code otherwise
+ */
+static int pxa_udc_wakeup(struct usb_gadget *_gadget)
+{
+	struct pxa_udc *udc = to_gadget_udc(_gadget);
+
+	/* host may not have enabled remote wakeup */
+	if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0)
+		return -EHOSTUNREACH;
+	udc_set_mask_UDCCR(udc, UDCCR_UDR);
+	return 0;
+}
+
+static void udc_enable(struct pxa_udc *udc);
+static void udc_disable(struct pxa_udc *udc);
+
+/**
+ * should_enable_udc - Tells if UDC should be enabled
+ * @udc: udc device
+ * Context: any
+ *
+ * The UDC should be enabled if :
+
+ *  - the pullup resistor is connected
+ *  - and a gadget driver is bound
+ *  - and vbus is sensed (or no vbus sense is available)
+ *
+ * Returns 1 if UDC should be enabled, 0 otherwise
+ */
+static int should_enable_udc(struct pxa_udc *udc)
+{
+	int put_on;
+
+	put_on = ((udc->pullup_on) && (udc->driver));
+	put_on &= ((udc->vbus_sensed) || (!udc->transceiver));
+	return put_on;
+}
+
+/**
+ * should_disable_udc - Tells if UDC should be disabled
+ * @udc: udc device
+ * Context: any
+ *
+ * The UDC should be disabled if :
+ *  - the pullup resistor is not connected
+ *  - or no gadget driver is bound
+ *  - or no vbus is sensed (when vbus sesing is available)
+ *
+ * Returns 1 if UDC should be disabled
+ */
+static int should_disable_udc(struct pxa_udc *udc)
+{
+	int put_off;
+
+	put_off = ((!udc->pullup_on) || (!udc->driver));
+	put_off |= ((!udc->vbus_sensed) && (udc->transceiver));
+	return put_off;
+}
+
+/**
+ * pxa_udc_pullup - Offer manual D+ pullup control
+ * @_gadget: usb gadget using the control
+ * @is_active: 0 if disconnect, else connect D+ pullup resistor
+ * Context: !in_interrupt()
+ *
+ * Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup
+ */
+static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
+{
+	struct pxa_udc *udc = to_gadget_udc(_gadget);
+
+	if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
+		return -EOPNOTSUPP;
+
+	dplus_pullup(udc, is_active);
+
+	if (should_enable_udc(udc))
+		udc_enable(udc);
+	if (should_disable_udc(udc))
+		udc_disable(udc);
+	return 0;
+}
+
+static void udc_enable(struct pxa_udc *udc);
+static void udc_disable(struct pxa_udc *udc);
+
+/**
+ * pxa_udc_vbus_session - Called by external transceiver to enable/disable udc
+ * @_gadget: usb gadget
+ * @is_active: 0 if should disable the udc, 1 if should enable
+ *
+ * Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the
+ * udc, and deactivates D+ pullup resistor.
+ *
+ * Returns 0
+ */
+static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct pxa_udc *udc = to_gadget_udc(_gadget);
+
+	udc->vbus_sensed = is_active;
+	if (should_enable_udc(udc))
+		udc_enable(udc);
+	if (should_disable_udc(udc))
+		udc_disable(udc);
+
+	return 0;
+}
+
+/**
+ * pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed
+ * @_gadget: usb gadget
+ * @mA: current drawn
+ *
+ * Context: !in_interrupt()
+ *
+ * Called after a configuration was chosen by a USB host, to inform how much
+ * current can be drawn by the device from VBus line.
+ *
+ * Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc
+ */
+static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+	struct pxa_udc *udc;
+
+	udc = to_gadget_udc(_gadget);
+	if (udc->transceiver)
+		return usb_phy_set_power(udc->transceiver, mA);
+	return -EOPNOTSUPP;
+}
+
+static int pxa27x_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int pxa27x_udc_stop(struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops pxa_udc_ops = {
+	.get_frame	= pxa_udc_get_frame,
+	.wakeup		= pxa_udc_wakeup,
+	.pullup		= pxa_udc_pullup,
+	.vbus_session	= pxa_udc_vbus_session,
+	.vbus_draw	= pxa_udc_vbus_draw,
+	.start		= pxa27x_udc_start,
+	.stop		= pxa27x_udc_stop,
+};
+
+/**
+ * udc_disable - disable udc device controller
+ * @udc: udc device
+ * Context: any
+ *
+ * Disables the udc device : disables clocks, udc interrupts, control endpoint
+ * interrupts.
+ */
+static void udc_disable(struct pxa_udc *udc)
+{
+	if (!udc->enabled)
+		return;
+
+	udc_writel(udc, UDCICR0, 0);
+	udc_writel(udc, UDCICR1, 0);
+
+	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
+	clk_disable(udc->clk);
+
+	ep0_idle(udc);
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+
+	udc->enabled = 0;
+}
+
+/**
+ * udc_init_data - Initialize udc device data structures
+ * @dev: udc device
+ *
+ * Initializes gadget endpoint list, endpoints locks. No action is taken
+ * on the hardware.
+ */
+static __init void udc_init_data(struct pxa_udc *dev)
+{
+	int i;
+	struct pxa_ep *ep;
+
+	/* device/ep0 records init */
+	INIT_LIST_HEAD(&dev->gadget.ep_list);
+	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+	dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
+	ep0_idle(dev);
+
+	/* PXA endpoints init */
+	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
+		ep = &dev->pxa_ep[i];
+
+		ep->enabled = is_ep0(ep);
+		INIT_LIST_HEAD(&ep->queue);
+		spin_lock_init(&ep->lock);
+	}
+
+	/* USB endpoints init */
+	for (i = 1; i < NR_USB_ENDPOINTS; i++)
+		list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
+				&dev->gadget.ep_list);
+}
+
+/**
+ * udc_enable - Enables the udc device
+ * @dev: udc device
+ *
+ * Enables the udc device : enables clocks, udc interrupts, control endpoint
+ * interrupts, sets usb as UDC client and setups endpoints.
+ */
+static void udc_enable(struct pxa_udc *udc)
+{
+	if (udc->enabled)
+		return;
+
+	udc_writel(udc, UDCICR0, 0);
+	udc_writel(udc, UDCICR1, 0);
+	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
+
+	clk_enable(udc->clk);
+
+	ep0_idle(udc);
+	udc->gadget.speed = USB_SPEED_FULL;
+	memset(&udc->stats, 0, sizeof(udc->stats));
+
+	udc_set_mask_UDCCR(udc, UDCCR_UDE);
+	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
+	udelay(2);
+	if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
+		dev_err(udc->dev, "Configuration errors, udc disabled\n");
+
+	/*
+	 * Caller must be able to sleep in order to cope with startup transients
+	 */
+	msleep(100);
+
+	/* enable suspend/resume and reset irqs */
+	udc_writel(udc, UDCICR1,
+			UDCICR1_IECC | UDCICR1_IERU
+			| UDCICR1_IESU | UDCICR1_IERS);
+
+	/* enable ep0 irqs */
+	pio_irq_enable(&udc->pxa_ep[0]);
+
+	udc->enabled = 1;
+}
+
+/**
+ * pxa27x_start - Register gadget driver
+ * @driver: gadget driver
+ * @bind: bind function
+ *
+ * When a driver is successfully registered, it will receive control requests
+ * including set_configuration(), which enables non-control requests.  Then
+ * usb traffic follows until a disconnect is reported.  Then a host may connect
+ * again, or the driver might get unbound.
+ *
+ * Note that the udc is not automatically enabled. Check function
+ * should_enable_udc().
+ *
+ * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
+ */
+static int pxa27x_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct pxa_udc *udc = the_controller;
+	int retval;
+
+	if (!driver || driver->max_speed < USB_SPEED_FULL || !bind
+			|| !driver->disconnect || !driver->setup)
+		return -EINVAL;
+	if (!udc)
+		return -ENODEV;
+	if (udc->driver)
+		return -EBUSY;
+
+	/* first hook up the driver ... */
+	udc->driver = driver;
+	udc->gadget.dev.driver = &driver->driver;
+	dplus_pullup(udc, 1);
+
+	retval = device_add(&udc->gadget.dev);
+	if (retval) {
+		dev_err(udc->dev, "device_add error %d\n", retval);
+		goto add_fail;
+	}
+	retval = bind(&udc->gadget);
+	if (retval) {
+		dev_err(udc->dev, "bind to driver %s --> error %d\n",
+			driver->driver.name, retval);
+		goto bind_fail;
+	}
+	dev_dbg(udc->dev, "registered gadget driver '%s'\n",
+		driver->driver.name);
+
+	if (udc->transceiver) {
+		retval = otg_set_peripheral(udc->transceiver->otg,
+						&udc->gadget);
+		if (retval) {
+			dev_err(udc->dev, "can't bind to transceiver\n");
+			goto transceiver_fail;
+		}
+	}
+
+	if (should_enable_udc(udc))
+		udc_enable(udc);
+	return 0;
+
+transceiver_fail:
+	if (driver->unbind)
+		driver->unbind(&udc->gadget);
+bind_fail:
+	device_del(&udc->gadget.dev);
+add_fail:
+	udc->driver = NULL;
+	udc->gadget.dev.driver = NULL;
+	return retval;
+}
+
+/**
+ * stop_activity - Stops udc endpoints
+ * @udc: udc device
+ * @driver: gadget driver
+ *
+ * Disables all udc endpoints (even control endpoint), report disconnect to
+ * the gadget user.
+ */
+static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
+{
+	int i;
+
+	/* don't disconnect drivers more than once */
+	if (udc->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+
+	for (i = 0; i < NR_USB_ENDPOINTS; i++)
+		pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
+
+	if (driver)
+		driver->disconnect(&udc->gadget);
+}
+
+/**
+ * pxa27x_udc_stop - Unregister the gadget driver
+ * @driver: gadget driver
+ *
+ * Returns 0 if no error, -ENODEV, -EINVAL otherwise
+ */
+static int pxa27x_udc_stop(struct usb_gadget_driver *driver)
+{
+	struct pxa_udc *udc = the_controller;
+
+	if (!udc)
+		return -ENODEV;
+	if (!driver || driver != udc->driver || !driver->unbind)
+		return -EINVAL;
+
+	stop_activity(udc, driver);
+	udc_disable(udc);
+	dplus_pullup(udc, 0);
+
+	driver->unbind(&udc->gadget);
+	udc->driver = NULL;
+
+	device_del(&udc->gadget.dev);
+	dev_info(udc->dev, "unregistered gadget driver '%s'\n",
+		 driver->driver.name);
+
+	if (udc->transceiver)
+		return otg_set_peripheral(udc->transceiver->otg, NULL);
+	return 0;
+}
+
+/**
+ * handle_ep0_ctrl_req - handle control endpoint control request
+ * @udc: udc device
+ * @req: control request
+ */
+static void handle_ep0_ctrl_req(struct pxa_udc *udc,
+				struct pxa27x_request *req)
+{
+	struct pxa_ep *ep = &udc->pxa_ep[0];
+	union {
+		struct usb_ctrlrequest	r;
+		u32			word[2];
+	} u;
+	int i;
+	int have_extrabytes = 0;
+	unsigned long flags;
+
+	nuke(ep, -EPROTO);
+	spin_lock_irqsave(&ep->lock, flags);
+
+	/*
+	 * In the PXA320 manual, in the section about Back-to-Back setup
+	 * packets, it describes this situation.  The solution is to set OPC to
+	 * get rid of the status packet, and then continue with the setup
+	 * packet. Generalize to pxa27x CPUs.
+	 */
+	if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
+		ep_write_UDCCSR(ep, UDCCSR0_OPC);
+
+	/* read SETUP packet */
+	for (i = 0; i < 2; i++) {
+		if (unlikely(ep_is_empty(ep)))
+			goto stall;
+		u.word[i] = udc_ep_readl(ep, UDCDR);
+	}
+
+	have_extrabytes = !ep_is_empty(ep);
+	while (!ep_is_empty(ep)) {
+		i = udc_ep_readl(ep, UDCDR);
+		ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
+	}
+
+	ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+		u.r.bRequestType, u.r.bRequest,
+		le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
+		le16_to_cpu(u.r.wLength));
+	if (unlikely(have_extrabytes))
+		goto stall;
+
+	if (u.r.bRequestType & USB_DIR_IN)
+		set_ep0state(udc, IN_DATA_STAGE);
+	else
+		set_ep0state(udc, OUT_DATA_STAGE);
+
+	/* Tell UDC to enter Data Stage */
+	ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
+
+	spin_unlock_irqrestore(&ep->lock, flags);
+	i = udc->driver->setup(&udc->gadget, &u.r);
+	spin_lock_irqsave(&ep->lock, flags);
+	if (i < 0)
+		goto stall;
+out:
+	spin_unlock_irqrestore(&ep->lock, flags);
+	return;
+stall:
+	ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
+		udc_ep_readl(ep, UDCCSR), i);
+	ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
+	set_ep0state(udc, STALL);
+	goto out;
+}
+
+/**
+ * handle_ep0 - Handle control endpoint data transfers
+ * @udc: udc device
+ * @fifo_irq: 1 if triggered by fifo service type irq
+ * @opc_irq: 1 if triggered by output packet complete type irq
+ *
+ * Context : when in_interrupt() or with ep->lock held
+ *
+ * Tries to transfer all pending request data into the endpoint and/or
+ * transfer all pending data in the endpoint into usb requests.
+ * Handles states of ep0 automata.
+ *
+ * PXA27x hardware handles several standard usb control requests without
+ * driver notification.  The requests fully handled by hardware are :
+ *  SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE,
+ *  GET_STATUS
+ * The requests handled by hardware, but with irq notification are :
+ *  SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE
+ * The remaining standard requests really handled by handle_ep0 are :
+ *  GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests.
+ * Requests standardized outside of USB 2.0 chapter 9 are handled more
+ * uniformly, by gadget drivers.
+ *
+ * The control endpoint state machine is _not_ USB spec compliant, it's even
+ * hardly compliant with Intel PXA270 developers guide.
+ * The key points which inferred this state machine are :
+ *   - on every setup token, bit UDCCSR0_SA is raised and held until cleared by
+ *     software.
+ *   - on every OUT packet received, UDCCSR0_OPC is raised and held until
+ *     cleared by software.
+ *   - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
+ *     before reading ep0.
+ *     This is true only for PXA27x. This is not true anymore for PXA3xx family
+ *     (check Back-to-Back setup packet in developers guide).
+ *   - irq can be called on a "packet complete" event (opc_irq=1), while
+ *     UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
+ *     from experimentation).
+ *   - as UDCCSR0_SA can be activated while in irq handling, and clearing
+ *     UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC
+ *     => we never actually read the "status stage" packet of an IN data stage
+ *     => this is not documented in Intel documentation
+ *   - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA
+ *     STAGE. The driver add STATUS STAGE to send last zero length packet in
+ *     OUT_STATUS_STAGE.
+ *   - special attention was needed for IN_STATUS_STAGE. If a packet complete
+ *     event is detected, we terminate the status stage without ackowledging the
+ *     packet (not to risk to loose a potential SETUP packet)
+ */
+static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
+{
+	u32			udccsr0;
+	struct pxa_ep		*ep = &udc->pxa_ep[0];
+	struct pxa27x_request	*req = NULL;
+	int			completed = 0;
+
+	if (!list_empty(&ep->queue))
+		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
+
+	udccsr0 = udc_ep_readl(ep, UDCCSR);
+	ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n",
+		EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
+		(fifo_irq << 1 | opc_irq));
+
+	if (udccsr0 & UDCCSR0_SST) {
+		ep_dbg(ep, "clearing stall status\n");
+		nuke(ep, -EPIPE);
+		ep_write_UDCCSR(ep, UDCCSR0_SST);
+		ep0_idle(udc);
+	}
+
+	if (udccsr0 & UDCCSR0_SA) {
+		nuke(ep, 0);
+		set_ep0state(udc, SETUP_STAGE);
+	}
+
+	switch (udc->ep0state) {
+	case WAIT_FOR_SETUP:
+		/*
+		 * Hardware bug : beware, we cannot clear OPC, since we would
+		 * miss a potential OPC irq for a setup packet.
+		 * So, we only do ... nothing, and hope for a next irq with
+		 * UDCCSR0_SA set.
+		 */
+		break;
+	case SETUP_STAGE:
+		udccsr0 &= UDCCSR0_CTRL_REQ_MASK;
+		if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK))
+			handle_ep0_ctrl_req(udc, req);
+		break;
+	case IN_DATA_STAGE:			/* GET_DESCRIPTOR */
+		if (epout_has_pkt(ep))
+			ep_write_UDCCSR(ep, UDCCSR0_OPC);
+		if (req && !ep_is_full(ep))
+			completed = write_ep0_fifo(ep, req);
+		if (completed)
+			ep0_end_in_req(ep, req, NULL);
+		break;
+	case OUT_DATA_STAGE:			/* SET_DESCRIPTOR */
+		if (epout_has_pkt(ep) && req)
+			completed = read_ep0_fifo(ep, req);
+		if (completed)
+			ep0_end_out_req(ep, req, NULL);
+		break;
+	case STALL:
+		ep_write_UDCCSR(ep, UDCCSR0_FST);
+		break;
+	case IN_STATUS_STAGE:
+		/*
+		 * Hardware bug : beware, we cannot clear OPC, since we would
+		 * miss a potential PC irq for a setup packet.
+		 * So, we only put the ep0 into WAIT_FOR_SETUP state.
+		 */
+		if (opc_irq)
+			ep0_idle(udc);
+		break;
+	case OUT_STATUS_STAGE:
+	case WAIT_ACK_SET_CONF_INTERF:
+		ep_warn(ep, "should never get in %s state here!!!\n",
+				EP0_STNAME(ep->dev));
+		ep0_idle(udc);
+		break;
+	}
+}
+
+/**
+ * handle_ep - Handle endpoint data tranfers
+ * @ep: pxa physical endpoint
+ *
+ * Tries to transfer all pending request data into the endpoint and/or
+ * transfer all pending data in the endpoint into usb requests.
+ *
+ * Is always called when in_interrupt() and with ep->lock released.
+ */
+static void handle_ep(struct pxa_ep *ep)
+{
+	struct pxa27x_request	*req;
+	int completed;
+	u32 udccsr;
+	int is_in = ep->dir_in;
+	int loop = 0;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&ep->lock, flags);
+	if (ep->in_handle_ep)
+		goto recursion_detected;
+	ep->in_handle_ep = 1;
+
+	do {
+		completed = 0;
+		udccsr = udc_ep_readl(ep, UDCCSR);
+
+		if (likely(!list_empty(&ep->queue)))
+			req = list_entry(ep->queue.next,
+					struct pxa27x_request, queue);
+		else
+			req = NULL;
+
+		ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n",
+				req, udccsr, loop++);
+
+		if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN)))
+			udc_ep_writel(ep, UDCCSR,
+					udccsr & (UDCCSR_SST | UDCCSR_TRN));
+		if (!req)
+			break;
+
+		if (unlikely(is_in)) {
+			if (likely(!ep_is_full(ep)))
+				completed = write_fifo(ep, req);
+		} else {
+			if (likely(epout_has_pkt(ep)))
+				completed = read_fifo(ep, req);
+		}
+
+		if (completed) {
+			if (is_in)
+				ep_end_in_req(ep, req, &flags);
+			else
+				ep_end_out_req(ep, req, &flags);
+		}
+	} while (completed);
+
+	ep->in_handle_ep = 0;
+recursion_detected:
+	spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+/**
+ * pxa27x_change_configuration - Handle SET_CONF usb request notification
+ * @udc: udc device
+ * @config: usb configuration
+ *
+ * Post the request to upper level.
+ * Don't use any pxa specific harware configuration capabilities
+ */
+static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
+{
+	struct usb_ctrlrequest req ;
+
+	dev_dbg(udc->dev, "config=%d\n", config);
+
+	udc->config = config;
+	udc->last_interface = 0;
+	udc->last_alternate = 0;
+
+	req.bRequestType = 0;
+	req.bRequest = USB_REQ_SET_CONFIGURATION;
+	req.wValue = config;
+	req.wIndex = 0;
+	req.wLength = 0;
+
+	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
+	udc->driver->setup(&udc->gadget, &req);
+	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
+}
+
+/**
+ * pxa27x_change_interface - Handle SET_INTERF usb request notification
+ * @udc: udc device
+ * @iface: interface number
+ * @alt: alternate setting number
+ *
+ * Post the request to upper level.
+ * Don't use any pxa specific harware configuration capabilities
+ */
+static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
+{
+	struct usb_ctrlrequest  req;
+
+	dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt);
+
+	udc->last_interface = iface;
+	udc->last_alternate = alt;
+
+	req.bRequestType = USB_RECIP_INTERFACE;
+	req.bRequest = USB_REQ_SET_INTERFACE;
+	req.wValue = alt;
+	req.wIndex = iface;
+	req.wLength = 0;
+
+	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
+	udc->driver->setup(&udc->gadget, &req);
+	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
+}
+
+/*
+ * irq_handle_data - Handle data transfer
+ * @irq: irq IRQ number
+ * @udc: dev pxa_udc device structure
+ *
+ * Called from irq handler, transferts data to or from endpoint to queue
+ */
+static void irq_handle_data(int irq, struct pxa_udc *udc)
+{
+	int i;
+	struct pxa_ep *ep;
+	u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK;
+	u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK;
+
+	if (udcisr0 & UDCISR_INT_MASK) {
+		udc->pxa_ep[0].stats.irqs++;
+		udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK));
+		handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR),
+				!!(udcisr0 & UDCICR_PKTCOMPL));
+	}
+
+	udcisr0 >>= 2;
+	for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) {
+		if (!(udcisr0 & UDCISR_INT_MASK))
+			continue;
+
+		udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
+
+		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+		if (i < ARRAY_SIZE(udc->pxa_ep)) {
+			ep = &udc->pxa_ep[i];
+			ep->stats.irqs++;
+			handle_ep(ep);
+		}
+	}
+
+	for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
+		udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK));
+		if (!(udcisr1 & UDCISR_INT_MASK))
+			continue;
+
+		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+		if (i < ARRAY_SIZE(udc->pxa_ep)) {
+			ep = &udc->pxa_ep[i];
+			ep->stats.irqs++;
+			handle_ep(ep);
+		}
+	}
+
+}
+
+/**
+ * irq_udc_suspend - Handle IRQ "UDC Suspend"
+ * @udc: udc device
+ */
+static void irq_udc_suspend(struct pxa_udc *udc)
+{
+	udc_writel(udc, UDCISR1, UDCISR1_IRSU);
+	udc->stats.irqs_suspend++;
+
+	if (udc->gadget.speed != USB_SPEED_UNKNOWN
+			&& udc->driver && udc->driver->suspend)
+		udc->driver->suspend(&udc->gadget);
+	ep0_idle(udc);
+}
+
+/**
+  * irq_udc_resume - Handle IRQ "UDC Resume"
+  * @udc: udc device
+  */
+static void irq_udc_resume(struct pxa_udc *udc)
+{
+	udc_writel(udc, UDCISR1, UDCISR1_IRRU);
+	udc->stats.irqs_resume++;
+
+	if (udc->gadget.speed != USB_SPEED_UNKNOWN
+			&& udc->driver && udc->driver->resume)
+		udc->driver->resume(&udc->gadget);
+}
+
+/**
+ * irq_udc_reconfig - Handle IRQ "UDC Change Configuration"
+ * @udc: udc device
+ */
+static void irq_udc_reconfig(struct pxa_udc *udc)
+{
+	unsigned config, interface, alternate, config_change;
+	u32 udccr = udc_readl(udc, UDCCR);
+
+	udc_writel(udc, UDCISR1, UDCISR1_IRCC);
+	udc->stats.irqs_reconfig++;
+
+	config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S;
+	config_change = (config != udc->config);
+	pxa27x_change_configuration(udc, config);
+
+	interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S;
+	alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S;
+	pxa27x_change_interface(udc, interface, alternate);
+
+	if (config_change)
+		update_pxa_ep_matches(udc);
+	udc_set_mask_UDCCR(udc, UDCCR_SMAC);
+}
+
+/**
+ * irq_udc_reset - Handle IRQ "UDC Reset"
+ * @udc: udc device
+ */
+static void irq_udc_reset(struct pxa_udc *udc)
+{
+	u32 udccr = udc_readl(udc, UDCCR);
+	struct pxa_ep *ep = &udc->pxa_ep[0];
+
+	dev_info(udc->dev, "USB reset\n");
+	udc_writel(udc, UDCISR1, UDCISR1_IRRS);
+	udc->stats.irqs_reset++;
+
+	if ((udccr & UDCCR_UDA) == 0) {
+		dev_dbg(udc->dev, "USB reset start\n");
+		stop_activity(udc, udc->driver);
+	}
+	udc->gadget.speed = USB_SPEED_FULL;
+	memset(&udc->stats, 0, sizeof udc->stats);
+
+	nuke(ep, -EPROTO);
+	ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
+	ep0_idle(udc);
+}
+
+/**
+ * pxa_udc_irq - Main irq handler
+ * @irq: irq number
+ * @_dev: udc device
+ *
+ * Handles all udc interrupts
+ */
+static irqreturn_t pxa_udc_irq(int irq, void *_dev)
+{
+	struct pxa_udc *udc = _dev;
+	u32 udcisr0 = udc_readl(udc, UDCISR0);
+	u32 udcisr1 = udc_readl(udc, UDCISR1);
+	u32 udccr = udc_readl(udc, UDCCR);
+	u32 udcisr1_spec;
+
+	dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, "
+		 "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr);
+
+	udcisr1_spec = udcisr1 & 0xf8000000;
+	if (unlikely(udcisr1_spec & UDCISR1_IRSU))
+		irq_udc_suspend(udc);
+	if (unlikely(udcisr1_spec & UDCISR1_IRRU))
+		irq_udc_resume(udc);
+	if (unlikely(udcisr1_spec & UDCISR1_IRCC))
+		irq_udc_reconfig(udc);
+	if (unlikely(udcisr1_spec & UDCISR1_IRRS))
+		irq_udc_reset(udc);
+
+	if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK))
+		irq_handle_data(irq, udc);
+
+	return IRQ_HANDLED;
+}
+
+static struct pxa_udc memory = {
+	.gadget = {
+		.ops		= &pxa_udc_ops,
+		.ep0		= &memory.udc_usb_ep[0].usb_ep,
+		.name		= driver_name,
+		.dev = {
+			.init_name	= "gadget",
+		},
+	},
+
+	.udc_usb_ep = {
+		USB_EP_CTRL,
+		USB_EP_OUT_BULK(1),
+		USB_EP_IN_BULK(2),
+		USB_EP_IN_ISO(3),
+		USB_EP_OUT_ISO(4),
+		USB_EP_IN_INT(5),
+	},
+
+	.pxa_ep = {
+		PXA_EP_CTRL,
+		/* Endpoints for gadget zero */
+		PXA_EP_OUT_BULK(1, 1, 3, 0, 0),
+		PXA_EP_IN_BULK(2,  2, 3, 0, 0),
+		/* Endpoints for ether gadget, file storage gadget */
+		PXA_EP_OUT_BULK(3, 1, 1, 0, 0),
+		PXA_EP_IN_BULK(4,  2, 1, 0, 0),
+		PXA_EP_IN_ISO(5,   3, 1, 0, 0),
+		PXA_EP_OUT_ISO(6,  4, 1, 0, 0),
+		PXA_EP_IN_INT(7,   5, 1, 0, 0),
+		/* Endpoints for RNDIS, serial */
+		PXA_EP_OUT_BULK(8, 1, 2, 0, 0),
+		PXA_EP_IN_BULK(9,  2, 2, 0, 0),
+		PXA_EP_IN_INT(10,  5, 2, 0, 0),
+		/*
+		 * All the following endpoints are only for completion.  They
+		 * won't never work, as multiple interfaces are really broken on
+		 * the pxa.
+		*/
+		PXA_EP_OUT_BULK(11, 1, 2, 1, 0),
+		PXA_EP_IN_BULK(12,  2, 2, 1, 0),
+		/* Endpoint for CDC Ether */
+		PXA_EP_OUT_BULK(13, 1, 1, 1, 1),
+		PXA_EP_IN_BULK(14,  2, 1, 1, 1),
+	}
+};
+
+/**
+ * pxa_udc_probe - probes the udc device
+ * @_dev: platform device
+ *
+ * Perform basic init : allocates udc clock, creates sysfs files, requests
+ * irq.
+ */
+static int __init pxa_udc_probe(struct platform_device *pdev)
+{
+	struct resource *regs;
+	struct pxa_udc *udc = &memory;
+	int retval = 0, gpio;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENXIO;
+	udc->irq = platform_get_irq(pdev, 0);
+	if (udc->irq < 0)
+		return udc->irq;
+
+	udc->dev = &pdev->dev;
+	udc->mach = pdev->dev.platform_data;
+	udc->transceiver = usb_get_transceiver();
+
+	gpio = udc->mach->gpio_pullup;
+	if (gpio_is_valid(gpio)) {
+		retval = gpio_request(gpio, "USB D+ pullup");
+		if (retval == 0)
+			gpio_direction_output(gpio,
+				       udc->mach->gpio_pullup_inverted);
+	}
+	if (retval) {
+		dev_err(&pdev->dev, "Couldn't request gpio %d : %d\n",
+			gpio, retval);
+		return retval;
+	}
+
+	udc->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(udc->clk)) {
+		retval = PTR_ERR(udc->clk);
+		goto err_clk;
+	}
+
+	retval = -ENOMEM;
+	udc->regs = ioremap(regs->start, resource_size(regs));
+	if (!udc->regs) {
+		dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
+		goto err_map;
+	}
+
+	device_initialize(&udc->gadget.dev);
+	udc->gadget.dev.parent = &pdev->dev;
+	udc->gadget.dev.dma_mask = NULL;
+	udc->vbus_sensed = 0;
+
+	the_controller = udc;
+	platform_set_drvdata(pdev, udc);
+	udc_init_data(udc);
+	pxa_eps_setup(udc);
+
+	/* irq setup after old hardware state is cleaned up */
+	retval = request_irq(udc->irq, pxa_udc_irq,
+			IRQF_SHARED, driver_name, udc);
+	if (retval != 0) {
+		dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
+			driver_name, IRQ_USB, retval);
+		goto err_irq;
+	}
+	retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+	if (retval)
+		goto err_add_udc;
+
+	pxa_init_debugfs(udc);
+	return 0;
+err_add_udc:
+	free_irq(udc->irq, udc);
+err_irq:
+	iounmap(udc->regs);
+err_map:
+	clk_put(udc->clk);
+	udc->clk = NULL;
+err_clk:
+	return retval;
+}
+
+/**
+ * pxa_udc_remove - removes the udc device driver
+ * @_dev: platform device
+ */
+static int __exit pxa_udc_remove(struct platform_device *_dev)
+{
+	struct pxa_udc *udc = platform_get_drvdata(_dev);
+	int gpio = udc->mach->gpio_pullup;
+
+	usb_del_gadget_udc(&udc->gadget);
+	usb_gadget_unregister_driver(udc->driver);
+	free_irq(udc->irq, udc);
+	pxa_cleanup_debugfs(udc);
+	if (gpio_is_valid(gpio))
+		gpio_free(gpio);
+
+	usb_put_transceiver(udc->transceiver);
+
+	udc->transceiver = NULL;
+	platform_set_drvdata(_dev, NULL);
+	the_controller = NULL;
+	clk_put(udc->clk);
+	iounmap(udc->regs);
+
+	return 0;
+}
+
+static void pxa_udc_shutdown(struct platform_device *_dev)
+{
+	struct pxa_udc *udc = platform_get_drvdata(_dev);
+
+	if (udc_readl(udc, UDCCR) & UDCCR_UDE)
+		udc_disable(udc);
+}
+
+#ifdef CONFIG_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph()   do {} while (0)
+#endif
+
+#ifdef CONFIG_PM
+/**
+ * pxa_udc_suspend - Suspend udc device
+ * @_dev: platform device
+ * @state: suspend state
+ *
+ * Suspends udc : saves configuration registers (UDCCR*), then disables the udc
+ * device.
+ */
+static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
+{
+	int i;
+	struct pxa_udc *udc = platform_get_drvdata(_dev);
+	struct pxa_ep *ep;
+
+	ep = &udc->pxa_ep[0];
+	udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
+	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
+		ep = &udc->pxa_ep[i];
+		ep->udccsr_value = udc_ep_readl(ep, UDCCSR);
+		ep->udccr_value  = udc_ep_readl(ep, UDCCR);
+		ep_dbg(ep, "udccsr:0x%03x, udccr:0x%x\n",
+				ep->udccsr_value, ep->udccr_value);
+	}
+
+	udc_disable(udc);
+	udc->pullup_resume = udc->pullup_on;
+	dplus_pullup(udc, 0);
+
+	return 0;
+}
+
+/**
+ * pxa_udc_resume - Resume udc device
+ * @_dev: platform device
+ *
+ * Resumes udc : restores configuration registers (UDCCR*), then enables the udc
+ * device.
+ */
+static int pxa_udc_resume(struct platform_device *_dev)
+{
+	int i;
+	struct pxa_udc *udc = platform_get_drvdata(_dev);
+	struct pxa_ep *ep;
+
+	ep = &udc->pxa_ep[0];
+	udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
+	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
+		ep = &udc->pxa_ep[i];
+		udc_ep_writel(ep, UDCCSR, ep->udccsr_value);
+		udc_ep_writel(ep, UDCCR,  ep->udccr_value);
+		ep_dbg(ep, "udccsr:0x%03x, udccr:0x%x\n",
+				ep->udccsr_value, ep->udccr_value);
+	}
+
+	dplus_pullup(udc, udc->pullup_resume);
+	if (should_enable_udc(udc))
+		udc_enable(udc);
+	/*
+	 * We do not handle OTG yet.
+	 *
+	 * OTGPH bit is set when sleep mode is entered.
+	 * it indicates that OTG pad is retaining its state.
+	 * Upon exit from sleep mode and before clearing OTGPH,
+	 * Software must configure the USB OTG pad, UDC, and UHC
+	 * to the state they were in before entering sleep mode.
+	 */
+	pxa27x_clear_otgph();
+
+	return 0;
+}
+#endif
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:pxa27x-udc");
+
+static struct platform_driver udc_driver = {
+	.driver		= {
+		.name	= "pxa27x-udc",
+		.owner	= THIS_MODULE,
+	},
+	.remove		= __exit_p(pxa_udc_remove),
+	.shutdown	= pxa_udc_shutdown,
+#ifdef CONFIG_PM
+	.suspend	= pxa_udc_suspend,
+	.resume		= pxa_udc_resume
+#endif
+};
+
+static int __init udc_init(void)
+{
+	if (!cpu_is_pxa27x() && !cpu_is_pxa3xx())
+		return -ENODEV;
+
+	printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
+	return platform_driver_probe(&udc_driver, pxa_udc_probe);
+}
+module_init(udc_init);
+
+
+static void __exit udc_exit(void)
+{
+	platform_driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Robert Jarzmik");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa27x_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa27x_udc.h
new file mode 100644
index 0000000..a1d268c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/pxa27x_udc.h
@@ -0,0 +1,496 @@
+/*
+ * linux/drivers/usb/gadget/pxa27x_udc.h
+ * Intel PXA27x on-chip full speed USB device controller
+ *
+ * Inspired by original driver by Frank Becker, David Brownell, and others.
+ * Copyright (C) 2008 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_USB_GADGET_PXA27X_H
+#define __LINUX_USB_GADGET_PXA27X_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/usb/otg.h>
+
+/*
+ * Register definitions
+ */
+/* Offsets */
+#define UDCCR		0x0000		/* UDC Control Register */
+#define UDCICR0		0x0004		/* UDC Interrupt Control Register0 */
+#define UDCICR1		0x0008		/* UDC Interrupt Control Register1 */
+#define UDCISR0		0x000C		/* UDC Interrupt Status Register 0 */
+#define UDCISR1		0x0010		/* UDC Interrupt Status Register 1 */
+#define UDCFNR		0x0014		/* UDC Frame Number Register */
+#define UDCOTGICR	0x0018		/* UDC On-The-Go interrupt control */
+#define UP2OCR		0x0020		/* USB Port 2 Output Control register */
+#define UP3OCR		0x0024		/* USB Port 3 Output Control register */
+#define UDCCSRn(x)	(0x0100 + ((x)<<2)) /* UDC Control/Status register */
+#define UDCBCRn(x)	(0x0200 + ((x)<<2)) /* UDC Byte Count Register */
+#define UDCDRn(x)	(0x0300 + ((x)<<2)) /* UDC Data Register  */
+#define UDCCRn(x)	(0x0400 + ((x)<<2)) /* UDC Control Register */
+
+#define UDCCR_OEN	(1 << 31)	/* On-the-Go Enable */
+#define UDCCR_AALTHNP	(1 << 30)	/* A-device Alternate Host Negotiation
+					   Protocol Port Support */
+#define UDCCR_AHNP	(1 << 29)	/* A-device Host Negotiation Protocol
+					   Support */
+#define UDCCR_BHNP	(1 << 28)	/* B-device Host Negotiation Protocol
+					   Enable */
+#define UDCCR_DWRE	(1 << 16)	/* Device Remote Wake-up Enable */
+#define UDCCR_ACN	(0x03 << 11)	/* Active UDC configuration Number */
+#define UDCCR_ACN_S	11
+#define UDCCR_AIN	(0x07 << 8)	/* Active UDC interface Number */
+#define UDCCR_AIN_S	8
+#define UDCCR_AAISN	(0x07 << 5)	/* Active UDC Alternate Interface
+					   Setting Number */
+#define UDCCR_AAISN_S	5
+#define UDCCR_SMAC	(1 << 4)	/* Switch Endpoint Memory to Active
+					   Configuration */
+#define UDCCR_EMCE	(1 << 3)	/* Endpoint Memory Configuration
+					   Error */
+#define UDCCR_UDR	(1 << 2)	/* UDC Resume */
+#define UDCCR_UDA	(1 << 1)	/* UDC Active */
+#define UDCCR_UDE	(1 << 0)	/* UDC Enable */
+
+#define UDCICR_INT(n, intr) (((intr) & 0x03) << (((n) & 0x0F) * 2))
+#define UDCICR1_IECC	(1 << 31)	/* IntEn - Configuration Change */
+#define UDCICR1_IESOF	(1 << 30)	/* IntEn - Start of Frame */
+#define UDCICR1_IERU	(1 << 29)	/* IntEn - Resume */
+#define UDCICR1_IESU	(1 << 28)	/* IntEn - Suspend */
+#define UDCICR1_IERS	(1 << 27)	/* IntEn - Reset */
+#define UDCICR_FIFOERR	(1 << 1)	/* FIFO Error interrupt for EP */
+#define UDCICR_PKTCOMPL	(1 << 0)	/* Packet Complete interrupt for EP */
+#define UDCICR_INT_MASK	(UDCICR_FIFOERR | UDCICR_PKTCOMPL)
+
+#define UDCISR_INT(n, intr) (((intr) & 0x03) << (((n) & 0x0F) * 2))
+#define UDCISR1_IRCC	(1 << 31)	/* IntReq - Configuration Change */
+#define UDCISR1_IRSOF	(1 << 30)	/* IntReq - Start of Frame */
+#define UDCISR1_IRRU	(1 << 29)	/* IntReq - Resume */
+#define UDCISR1_IRSU	(1 << 28)	/* IntReq - Suspend */
+#define UDCISR1_IRRS	(1 << 27)	/* IntReq - Reset */
+#define UDCISR_INT_MASK	(UDCICR_FIFOERR | UDCICR_PKTCOMPL)
+
+#define UDCOTGICR_IESF	(1 << 24)	/* OTG SET_FEATURE command recvd */
+#define UDCOTGICR_IEXR	(1 << 17)	/* Extra Transceiver Interrupt
+					   Rising Edge Interrupt Enable */
+#define UDCOTGICR_IEXF	(1 << 16)	/* Extra Transceiver Interrupt
+					   Falling Edge Interrupt Enable */
+#define UDCOTGICR_IEVV40R (1 << 9)	/* OTG Vbus Valid 4.0V Rising Edge
+					   Interrupt Enable */
+#define UDCOTGICR_IEVV40F (1 << 8)	/* OTG Vbus Valid 4.0V Falling Edge
+					   Interrupt Enable */
+#define UDCOTGICR_IEVV44R (1 << 7)	/* OTG Vbus Valid 4.4V Rising Edge
+					   Interrupt Enable */
+#define UDCOTGICR_IEVV44F (1 << 6)	/* OTG Vbus Valid 4.4V Falling Edge
+					   Interrupt Enable */
+#define UDCOTGICR_IESVR	(1 << 5)	/* OTG Session Valid Rising Edge
+					   Interrupt Enable */
+#define UDCOTGICR_IESVF	(1 << 4)	/* OTG Session Valid Falling Edge
+					   Interrupt Enable */
+#define UDCOTGICR_IESDR	(1 << 3)	/* OTG A-Device SRP Detect Rising
+					   Edge Interrupt Enable */
+#define UDCOTGICR_IESDF	(1 << 2)	/* OTG A-Device SRP Detect Falling
+					   Edge Interrupt Enable */
+#define UDCOTGICR_IEIDR	(1 << 1)	/* OTG ID Change Rising Edge
+					   Interrupt Enable */
+#define UDCOTGICR_IEIDF	(1 << 0)	/* OTG ID Change Falling Edge
+					   Interrupt Enable */
+
+/* Host Port 2 field bits */
+#define UP2OCR_CPVEN	(1 << 0)	/* Charge Pump Vbus Enable */
+#define UP2OCR_CPVPE	(1 << 1)	/* Charge Pump Vbus Pulse Enable */
+					/* Transceiver enablers */
+#define UP2OCR_DPPDE	(1 << 2)	/*   D+ Pull Down Enable */
+#define UP2OCR_DMPDE	(1 << 3)	/*   D- Pull Down Enable */
+#define UP2OCR_DPPUE	(1 << 4)	/*   D+ Pull Up Enable */
+#define UP2OCR_DMPUE	(1 << 5)	/*   D- Pull Up Enable */
+#define UP2OCR_DPPUBE	(1 << 6)	/*   D+ Pull Up Bypass Enable */
+#define UP2OCR_DMPUBE	(1 << 7)	/*   D- Pull Up Bypass Enable */
+#define UP2OCR_EXSP	(1 << 8)	/* External Transceiver Speed Control */
+#define UP2OCR_EXSUS	(1 << 9)	/* External Transceiver Speed Enable */
+#define UP2OCR_IDON	(1 << 10)	/* OTG ID Read Enable */
+#define UP2OCR_HXS	(1 << 16)	/* Transceiver Output Select */
+#define UP2OCR_HXOE	(1 << 17)	/* Transceiver Output Enable */
+#define UP2OCR_SEOS	(1 << 24)	/* Single-Ended Output Select */
+
+#define UDCCSR0_ACM	(1 << 9)	/* Ack Control Mode */
+#define UDCCSR0_AREN	(1 << 8)	/* Ack Response Enable */
+#define UDCCSR0_SA	(1 << 7)	/* Setup Active */
+#define UDCCSR0_RNE	(1 << 6)	/* Receive FIFO Not Empty */
+#define UDCCSR0_FST	(1 << 5)	/* Force Stall */
+#define UDCCSR0_SST	(1 << 4)	/* Sent Stall */
+#define UDCCSR0_DME	(1 << 3)	/* DMA Enable */
+#define UDCCSR0_FTF	(1 << 2)	/* Flush Transmit FIFO */
+#define UDCCSR0_IPR	(1 << 1)	/* IN Packet Ready */
+#define UDCCSR0_OPC	(1 << 0)	/* OUT Packet Complete */
+
+#define UDCCSR_DPE	(1 << 9)	/* Data Packet Error */
+#define UDCCSR_FEF	(1 << 8)	/* Flush Endpoint FIFO */
+#define UDCCSR_SP	(1 << 7)	/* Short Packet Control/Status */
+#define UDCCSR_BNE	(1 << 6)	/* Buffer Not Empty (IN endpoints) */
+#define UDCCSR_BNF	(1 << 6)	/* Buffer Not Full (OUT endpoints) */
+#define UDCCSR_FST	(1 << 5)	/* Force STALL */
+#define UDCCSR_SST	(1 << 4)	/* Sent STALL */
+#define UDCCSR_DME	(1 << 3)	/* DMA Enable */
+#define UDCCSR_TRN	(1 << 2)	/* Tx/Rx NAK */
+#define UDCCSR_PC	(1 << 1)	/* Packet Complete */
+#define UDCCSR_FS	(1 << 0)	/* FIFO needs service */
+
+#define UDCCONR_CN	(0x03 << 25)	/* Configuration Number */
+#define UDCCONR_CN_S	25
+#define UDCCONR_IN	(0x07 << 22)	/* Interface Number */
+#define UDCCONR_IN_S	22
+#define UDCCONR_AISN	(0x07 << 19)	/* Alternate Interface Number */
+#define UDCCONR_AISN_S	19
+#define UDCCONR_EN	(0x0f << 15)	/* Endpoint Number */
+#define UDCCONR_EN_S	15
+#define UDCCONR_ET	(0x03 << 13)	/* Endpoint Type: */
+#define UDCCONR_ET_S	13
+#define UDCCONR_ET_INT	(0x03 << 13)	/*   Interrupt */
+#define UDCCONR_ET_BULK	(0x02 << 13)	/*   Bulk */
+#define UDCCONR_ET_ISO	(0x01 << 13)	/*   Isochronous */
+#define UDCCONR_ET_NU	(0x00 << 13)	/*   Not used */
+#define UDCCONR_ED	(1 << 12)	/* Endpoint Direction */
+#define UDCCONR_MPS	(0x3ff << 2)	/* Maximum Packet Size */
+#define UDCCONR_MPS_S	2
+#define UDCCONR_DE	(1 << 1)	/* Double Buffering Enable */
+#define UDCCONR_EE	(1 << 0)	/* Endpoint Enable */
+
+#define UDCCR_MASK_BITS (UDCCR_OEN | UDCCR_SMAC | UDCCR_UDR | UDCCR_UDE)
+#define UDCCSR_WR_MASK	(UDCCSR_DME | UDCCSR_FST)
+#define UDC_FNR_MASK	(0x7ff)
+#define UDC_BCR_MASK	(0x3ff)
+
+/*
+ * UDCCR = UDC Endpoint Configuration Registers
+ * UDCCSR = UDC Control/Status Register for this EP
+ * UDCBCR = UDC Byte Count Remaining (contents of OUT fifo)
+ * UDCDR = UDC Endpoint Data Register (the fifo)
+ */
+#define ofs_UDCCR(ep)	(UDCCRn(ep->idx))
+#define ofs_UDCCSR(ep)	(UDCCSRn(ep->idx))
+#define ofs_UDCBCR(ep)	(UDCBCRn(ep->idx))
+#define ofs_UDCDR(ep)	(UDCDRn(ep->idx))
+
+/* Register access macros */
+#define udc_ep_readl(ep, reg)	\
+	__raw_readl((ep)->dev->regs + ofs_##reg(ep))
+#define udc_ep_writel(ep, reg, value)	\
+	__raw_writel((value), ep->dev->regs + ofs_##reg(ep))
+#define udc_ep_readb(ep, reg)	\
+	__raw_readb((ep)->dev->regs + ofs_##reg(ep))
+#define udc_ep_writeb(ep, reg, value)	\
+	__raw_writeb((value), ep->dev->regs + ofs_##reg(ep))
+#define udc_readl(dev, reg)	\
+	__raw_readl((dev)->regs + (reg))
+#define udc_writel(udc, reg, value)	\
+	__raw_writel((value), (udc)->regs + (reg))
+
+#define UDCCSR_MASK		(UDCCSR_FST | UDCCSR_DME)
+#define UDCCISR0_EP_MASK	~0
+#define UDCCISR1_EP_MASK	0xffff
+#define UDCCSR0_CTRL_REQ_MASK	(UDCCSR0_OPC | UDCCSR0_SA | UDCCSR0_RNE)
+
+#define EPIDX(ep)	(ep->idx)
+#define EPADDR(ep)	(ep->addr)
+#define EPXFERTYPE(ep)	(ep->type)
+#define EPNAME(ep)	(ep->name)
+#define is_ep0(ep)	(!ep->idx)
+#define EPXFERTYPE_is_ISO(ep) (EPXFERTYPE(ep) == USB_ENDPOINT_XFER_ISOC)
+
+/*
+ * Endpoint definitions
+ *
+ * Once enabled, pxa endpoint configuration is freezed, and cannot change
+ * unless a reset happens or the udc is disabled.
+ * Therefore, we must define all pxa potential endpoint definitions needed for
+ * all gadget and set them up before the udc is enabled.
+ *
+ * As the architecture chosen is fully static, meaning the pxa endpoint
+ * configurations are set up once and for all, we must provide a way to match
+ * one usb endpoint (usb_ep) to several pxa endpoints. The reason is that gadget
+ * layer autoconf doesn't choose the usb_ep endpoint on (config, interface, alt)
+ * criteria, while the pxa architecture requires that.
+ *
+ * The solution is to define several pxa endpoints matching one usb_ep. Ex:
+ *   - "ep1-in" matches pxa endpoint EPA (which is an IN ep at addr 1, when
+ *     the udc talks on (config=3, interface=0, alt=0)
+ *   - "ep1-in" matches pxa endpoint EPB (which is an IN ep at addr 1, when
+ *     the udc talks on (config=3, interface=0, alt=1)
+ *   - "ep1-in" matches pxa endpoint EPC (which is an IN ep at addr 1, when
+ *     the udc talks on (config=2, interface=0, alt=0)
+ *
+ * We'll define the pxa endpoint by its index (EPA => idx=1, EPB => idx=2, ...)
+ */
+
+/*
+ * Endpoint definition helpers
+ */
+#define USB_EP_DEF(addr, bname, dir, type, maxpkt) \
+{ .usb_ep = { .name = bname, .ops = &pxa_ep_ops, .maxpacket = maxpkt, }, \
+  .desc = {	.bEndpointAddress = addr | (dir ? USB_DIR_IN : 0), \
+		.bmAttributes = type, \
+		.wMaxPacketSize = maxpkt, }, \
+  .dev = &memory \
+}
+#define USB_EP_BULK(addr, bname, dir) \
+  USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_BULK, BULK_FIFO_SIZE)
+#define USB_EP_ISO(addr, bname, dir) \
+  USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_ISOC, ISO_FIFO_SIZE)
+#define USB_EP_INT(addr, bname, dir) \
+  USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_INT, INT_FIFO_SIZE)
+#define USB_EP_IN_BULK(n)	USB_EP_BULK(n, "ep" #n "in-bulk", 1)
+#define USB_EP_OUT_BULK(n)	USB_EP_BULK(n, "ep" #n "out-bulk", 0)
+#define USB_EP_IN_ISO(n)	USB_EP_ISO(n,  "ep" #n "in-iso", 1)
+#define USB_EP_OUT_ISO(n)	USB_EP_ISO(n,  "ep" #n "out-iso", 0)
+#define USB_EP_IN_INT(n)	USB_EP_INT(n,  "ep" #n "in-int", 1)
+#define USB_EP_CTRL		USB_EP_DEF(0,  "ep0", 0, 0, EP0_FIFO_SIZE)
+
+#define PXA_EP_DEF(_idx, _addr, dir, _type, maxpkt, _config, iface, altset) \
+{ \
+	.dev = &memory, \
+	.name = "ep" #_idx, \
+	.idx = _idx, .enabled = 0, \
+	.dir_in = dir, .addr = _addr, \
+	.config = _config, .interface = iface, .alternate = altset, \
+	.type = _type, .fifo_size = maxpkt, \
+}
+#define PXA_EP_BULK(_idx, addr, dir, config, iface, alt) \
+  PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_BULK, BULK_FIFO_SIZE, \
+		config, iface, alt)
+#define PXA_EP_ISO(_idx, addr, dir, config, iface, alt) \
+  PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_ISOC, ISO_FIFO_SIZE, \
+		config, iface, alt)
+#define PXA_EP_INT(_idx, addr, dir, config, iface, alt) \
+  PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_INT, INT_FIFO_SIZE, \
+		config, iface, alt)
+#define PXA_EP_IN_BULK(i, adr, c, f, a)		PXA_EP_BULK(i, adr, 1, c, f, a)
+#define PXA_EP_OUT_BULK(i, adr, c, f, a)	PXA_EP_BULK(i, adr, 0, c, f, a)
+#define PXA_EP_IN_ISO(i, adr, c, f, a)		PXA_EP_ISO(i, adr, 1, c, f, a)
+#define PXA_EP_OUT_ISO(i, adr, c, f, a)		PXA_EP_ISO(i, adr, 0, c, f, a)
+#define PXA_EP_IN_INT(i, adr, c, f, a)		PXA_EP_INT(i, adr, 1, c, f, a)
+#define PXA_EP_CTRL	PXA_EP_DEF(0, 0, 0, 0, EP0_FIFO_SIZE, 0, 0, 0)
+
+struct pxa27x_udc;
+
+struct stats {
+	unsigned long in_ops;
+	unsigned long out_ops;
+	unsigned long in_bytes;
+	unsigned long out_bytes;
+	unsigned long irqs;
+};
+
+/**
+ * struct udc_usb_ep - container of each usb_ep structure
+ * @usb_ep: usb endpoint
+ * @desc: usb descriptor, especially type and address
+ * @dev: udc managing this endpoint
+ * @pxa_ep: matching pxa_ep (cache of find_pxa_ep() call)
+ */
+struct udc_usb_ep {
+	struct usb_ep usb_ep;
+	struct usb_endpoint_descriptor desc;
+	struct pxa_udc *dev;
+	struct pxa_ep *pxa_ep;
+};
+
+/**
+ * struct pxa_ep - pxa endpoint
+ * @dev: udc device
+ * @queue: requests queue
+ * @lock: lock to pxa_ep data (queues and stats)
+ * @enabled: true when endpoint enabled (not stopped by gadget layer)
+ * @in_handle_ep: number of recursions of handle_ep() function
+ * 	Prevents deadlocks or infinite recursions of types :
+ *	  irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep()
+ *      or
+ *        pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue()
+ * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX)
+ * @name: endpoint name (for trace/debug purpose)
+ * @dir_in: 1 if IN endpoint, 0 if OUT endpoint
+ * @addr: usb endpoint number
+ * @config: configuration in which this endpoint is active
+ * @interface: interface in which this endpoint is active
+ * @alternate: altsetting in which this endpoitn is active
+ * @fifo_size: max packet size in the endpoint fifo
+ * @type: endpoint type (bulk, iso, int, ...)
+ * @udccsr_value: save register of UDCCSR0 for suspend/resume
+ * @udccr_value: save register of UDCCR for suspend/resume
+ * @stats: endpoint statistics
+ *
+ * The *PROBLEM* is that pxa's endpoint configuration scheme is both misdesigned
+ * (cares about config/interface/altsetting, thus placing needless limits on
+ * device capability) and full of implementation bugs forcing it to be set up
+ * for use more or less like a pxa255.
+ *
+ * As we define the pxa_ep statically, we must guess all needed pxa_ep for all
+ * gadget which may work with this udc driver.
+ */
+struct pxa_ep {
+	struct pxa_udc		*dev;
+
+	struct list_head	queue;
+	spinlock_t		lock;		/* Protects this structure */
+						/* (queues, stats) */
+	unsigned		enabled:1;
+	unsigned		in_handle_ep:1;
+
+	unsigned		idx:5;
+	char			*name;
+
+	/*
+	 * Specific pxa endpoint data, needed for hardware initialization
+	 */
+	unsigned		dir_in:1;
+	unsigned		addr:4;
+	unsigned		config:2;
+	unsigned		interface:3;
+	unsigned		alternate:3;
+	unsigned		fifo_size;
+	unsigned		type;
+
+#ifdef CONFIG_PM
+	u32			udccsr_value;
+	u32			udccr_value;
+#endif
+	struct stats		stats;
+};
+
+/**
+ * struct pxa27x_request - container of each usb_request structure
+ * @req: usb request
+ * @udc_usb_ep: usb endpoint the request was submitted on
+ * @in_use: sanity check if request already queued on an pxa_ep
+ * @queue: linked list of requests, linked on pxa_ep->queue
+ */
+struct pxa27x_request {
+	struct usb_request			req;
+	struct udc_usb_ep			*udc_usb_ep;
+	unsigned				in_use:1;
+	struct list_head			queue;
+};
+
+enum ep0_state {
+	WAIT_FOR_SETUP,
+	SETUP_STAGE,
+	IN_DATA_STAGE,
+	OUT_DATA_STAGE,
+	IN_STATUS_STAGE,
+	OUT_STATUS_STAGE,
+	STALL,
+	WAIT_ACK_SET_CONF_INTERF
+};
+
+static char *ep0_state_name[] = {
+	"WAIT_FOR_SETUP", "SETUP_STAGE", "IN_DATA_STAGE", "OUT_DATA_STAGE",
+	"IN_STATUS_STAGE", "OUT_STATUS_STAGE", "STALL",
+	"WAIT_ACK_SET_CONF_INTERF"
+};
+#define EP0_STNAME(udc) ep0_state_name[(udc)->ep0state]
+
+#define EP0_FIFO_SIZE	16U
+#define BULK_FIFO_SIZE	64U
+#define ISO_FIFO_SIZE	256U
+#define INT_FIFO_SIZE	16U
+
+struct udc_stats {
+	unsigned long	irqs_reset;
+	unsigned long	irqs_suspend;
+	unsigned long	irqs_resume;
+	unsigned long	irqs_reconfig;
+};
+
+#define NR_USB_ENDPOINTS (1 + 5)	/* ep0 + ep1in-bulk + .. + ep3in-iso */
+#define NR_PXA_ENDPOINTS (1 + 14)	/* ep0 + epA + epB + .. + epX */
+
+/**
+ * struct pxa_udc - udc structure
+ * @regs: mapped IO space
+ * @irq: udc irq
+ * @clk: udc clock
+ * @usb_gadget: udc gadget structure
+ * @driver: bound gadget (zero, g_ether, g_file_storage, ...)
+ * @dev: device
+ * @mach: machine info, used to activate specific GPIO
+ * @transceiver: external transceiver to handle vbus sense and D+ pullup
+ * @ep0state: control endpoint state machine state
+ * @stats: statistics on udc usage
+ * @udc_usb_ep: array of usb endpoints offered by the gadget
+ * @pxa_ep: array of pxa available endpoints
+ * @enabled: UDC was enabled by a previous udc_enable()
+ * @pullup_on: if pullup resistor connected to D+ pin
+ * @pullup_resume: if pullup resistor should be connected to D+ pin on resume
+ * @config: UDC active configuration
+ * @last_interface: UDC interface of the last SET_INTERFACE host request
+ * @last_alternate: UDC altsetting of the last SET_INTERFACE host request
+ * @udccsr0: save of udccsr0 in case of suspend
+ * @debugfs_root: root entry of debug filesystem
+ * @debugfs_state: debugfs entry for "udcstate"
+ * @debugfs_queues: debugfs entry for "queues"
+ * @debugfs_eps: debugfs entry for "epstate"
+ */
+struct pxa_udc {
+	void __iomem				*regs;
+	int					irq;
+	struct clk				*clk;
+
+	struct usb_gadget			gadget;
+	struct usb_gadget_driver		*driver;
+	struct device				*dev;
+	struct pxa2xx_udc_mach_info		*mach;
+	struct usb_phy				*transceiver;
+
+	enum ep0_state				ep0state;
+	struct udc_stats			stats;
+
+	struct udc_usb_ep			udc_usb_ep[NR_USB_ENDPOINTS];
+	struct pxa_ep				pxa_ep[NR_PXA_ENDPOINTS];
+
+	unsigned				enabled:1;
+	unsigned				pullup_on:1;
+	unsigned				pullup_resume:1;
+	unsigned				vbus_sensed:1;
+	unsigned				config:2;
+	unsigned				last_interface:3;
+	unsigned				last_alternate:3;
+
+#ifdef CONFIG_PM
+	unsigned				udccsr0;
+#endif
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+	struct dentry				*debugfs_root;
+	struct dentry				*debugfs_state;
+	struct dentry				*debugfs_queues;
+	struct dentry				*debugfs_eps;
+#endif
+};
+
+static inline struct pxa_udc *to_gadget_udc(struct usb_gadget *gadget)
+{
+	return container_of(gadget, struct pxa_udc, gadget);
+}
+
+/*
+ * Debugging/message support
+ */
+#define ep_dbg(ep, fmt, arg...) \
+	dev_dbg(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
+#define ep_vdbg(ep, fmt, arg...) \
+	dev_vdbg(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
+#define ep_err(ep, fmt, arg...) \
+	dev_err(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
+#define ep_info(ep, fmt, arg...) \
+	dev_info(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
+#define ep_warn(ep, fmt, arg...) \
+	dev_warn(ep->dev->dev, "%s:%s:" fmt, EPNAME(ep), __func__, ## arg)
+
+#endif /* __LINUX_USB_GADGET_PXA27X_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/r8a66597-udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/r8a66597-udc.c
new file mode 100644
index 0000000..c4401e7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/r8a66597-udc.c
@@ -0,0 +1,2043 @@
+/*
+ * R8A66597 UDC (USB gadget)
+ *
+ * Copyright (C) 2006-2009 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "r8a66597-udc.h"
+
+#define DRIVER_VERSION	"2011-09-26"
+
+static const char udc_name[] = "r8a66597_udc";
+static const char *r8a66597_ep_name[] = {
+	"ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
+	"ep8", "ep9",
+};
+
+static void init_controller(struct r8a66597 *r8a66597);
+static void disable_controller(struct r8a66597 *r8a66597);
+static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
+static void irq_packet_write(struct r8a66597_ep *ep,
+				struct r8a66597_request *req);
+static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
+			gfp_t gfp_flags);
+
+static void transfer_complete(struct r8a66597_ep *ep,
+		struct r8a66597_request *req, int status);
+
+/*-------------------------------------------------------------------------*/
+static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
+{
+	return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
+}
+
+static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
+		unsigned long reg)
+{
+	u16 tmp;
+
+	tmp = r8a66597_read(r8a66597, INTENB0);
+	r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
+			INTENB0);
+	r8a66597_bset(r8a66597, (1 << pipenum), reg);
+	r8a66597_write(r8a66597, tmp, INTENB0);
+}
+
+static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
+		unsigned long reg)
+{
+	u16 tmp;
+
+	tmp = r8a66597_read(r8a66597, INTENB0);
+	r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
+			INTENB0);
+	r8a66597_bclr(r8a66597, (1 << pipenum), reg);
+	r8a66597_write(r8a66597, tmp, INTENB0);
+}
+
+static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
+{
+	r8a66597_bset(r8a66597, CTRE, INTENB0);
+	r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
+
+	r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
+}
+
+static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+	r8a66597_bclr(r8a66597, CTRE, INTENB0);
+	r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
+	r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+
+	r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
+	spin_unlock(&r8a66597->lock);
+	r8a66597->driver->disconnect(&r8a66597->gadget);
+	spin_lock(&r8a66597->lock);
+
+	disable_controller(r8a66597);
+	init_controller(r8a66597);
+	r8a66597_bset(r8a66597, VBSE, INTENB0);
+	INIT_LIST_HEAD(&r8a66597->ep[0].queue);
+}
+
+static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	u16 pid = 0;
+	unsigned long offset;
+
+	if (pipenum == 0) {
+		pid = r8a66597_read(r8a66597, DCPCTR) & PID;
+	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		pid = r8a66597_read(r8a66597, offset) & PID;
+	} else {
+		dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+			pipenum);
+	}
+
+	return pid;
+}
+
+static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
+		u16 pid)
+{
+	unsigned long offset;
+
+	if (pipenum == 0) {
+		r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
+	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		r8a66597_mdfy(r8a66597, pid, PID, offset);
+	} else {
+		dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+			pipenum);
+	}
+}
+
+static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	control_reg_set_pid(r8a66597, pipenum, PID_BUF);
+}
+
+static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	control_reg_set_pid(r8a66597, pipenum, PID_NAK);
+}
+
+static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	control_reg_set_pid(r8a66597, pipenum, PID_STALL);
+}
+
+static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	u16 ret = 0;
+	unsigned long offset;
+
+	if (pipenum == 0) {
+		ret = r8a66597_read(r8a66597, DCPCTR);
+	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		ret = r8a66597_read(r8a66597, offset);
+	} else {
+		dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+			pipenum);
+	}
+
+	return ret;
+}
+
+static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	unsigned long offset;
+
+	pipe_stop(r8a66597, pipenum);
+
+	if (pipenum == 0) {
+		r8a66597_bset(r8a66597, SQCLR, DCPCTR);
+	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		r8a66597_bset(r8a66597, SQCLR, offset);
+	} else {
+		dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+			pipenum);
+	}
+}
+
+static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	unsigned long offset;
+
+	pipe_stop(r8a66597, pipenum);
+
+	if (pipenum == 0) {
+		r8a66597_bset(r8a66597, SQSET, DCPCTR);
+	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		r8a66597_bset(r8a66597, SQSET, offset);
+	} else {
+		dev_err(r8a66597_to_dev(r8a66597),
+			"unexpect pipe num(%d)\n", pipenum);
+	}
+}
+
+static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	unsigned long offset;
+
+	if (pipenum == 0) {
+		return r8a66597_read(r8a66597, DCPCTR) & SQMON;
+	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+		offset = get_pipectr_addr(pipenum);
+		return r8a66597_read(r8a66597, offset) & SQMON;
+	} else {
+		dev_err(r8a66597_to_dev(r8a66597),
+			"unexpect pipe num(%d)\n", pipenum);
+	}
+
+	return 0;
+}
+
+static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	return control_reg_sqmon(r8a66597, pipenum);
+}
+
+static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
+			       u16 toggle)
+{
+	if (toggle)
+		control_reg_sqset(r8a66597, pipenum);
+	else
+		control_reg_sqclr(r8a66597, pipenum);
+}
+
+static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	u16 tmp;
+	int size;
+
+	if (pipenum == 0) {
+		tmp = r8a66597_read(r8a66597, DCPCFG);
+		if ((tmp & R8A66597_CNTMD) != 0)
+			size = 256;
+		else {
+			tmp = r8a66597_read(r8a66597, DCPMAXP);
+			size = tmp & MAXP;
+		}
+	} else {
+		r8a66597_write(r8a66597, pipenum, PIPESEL);
+		tmp = r8a66597_read(r8a66597, PIPECFG);
+		if ((tmp & R8A66597_CNTMD) != 0) {
+			tmp = r8a66597_read(r8a66597, PIPEBUF);
+			size = ((tmp >> 10) + 1) * 64;
+		} else {
+			tmp = r8a66597_read(r8a66597, PIPEMAXP);
+			size = tmp & MXPS;
+		}
+	}
+
+	return size;
+}
+
+static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
+{
+	if (r8a66597->pdata->on_chip)
+		return MBW_32;
+	else
+		return MBW_16;
+}
+
+static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
+				    u16 isel, u16 fifosel)
+{
+	u16 tmp, mask, loop;
+	int i = 0;
+
+	if (!pipenum) {
+		mask = ISEL | CURPIPE;
+		loop = isel;
+	} else {
+		mask = CURPIPE;
+		loop = pipenum;
+	}
+	r8a66597_mdfy(r8a66597, loop, mask, fifosel);
+
+	do {
+		tmp = r8a66597_read(r8a66597, fifosel);
+		if (i++ > 1000000) {
+			dev_err(r8a66597_to_dev(r8a66597),
+				"r8a66597: register%x, loop %x "
+				"is timeout\n", fifosel, loop);
+			break;
+		}
+		ndelay(1);
+	} while ((tmp & mask) != loop);
+}
+
+static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
+
+	if (ep->use_dma)
+		r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
+
+	r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
+
+	ndelay(450);
+
+	if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
+		r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
+	else
+		r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
+
+	if (ep->use_dma)
+		r8a66597_bset(r8a66597, DREQE, ep->fifosel);
+}
+
+static int pipe_buffer_setting(struct r8a66597 *r8a66597,
+		struct r8a66597_pipe_info *info)
+{
+	u16 bufnum = 0, buf_bsize = 0;
+	u16 pipecfg = 0;
+
+	if (info->pipe == 0)
+		return -EINVAL;
+
+	r8a66597_write(r8a66597, info->pipe, PIPESEL);
+
+	if (info->dir_in)
+		pipecfg |= R8A66597_DIR;
+	pipecfg |= info->type;
+	pipecfg |= info->epnum;
+	switch (info->type) {
+	case R8A66597_INT:
+		bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
+		buf_bsize = 0;
+		break;
+	case R8A66597_BULK:
+		/* isochronous pipes may be used as bulk pipes */
+		if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
+			bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
+		else
+			bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
+
+		bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
+		buf_bsize = 7;
+		pipecfg |= R8A66597_DBLB;
+		if (!info->dir_in)
+			pipecfg |= R8A66597_SHTNAK;
+		break;
+	case R8A66597_ISO:
+		bufnum = R8A66597_BASE_BUFNUM +
+			 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
+		buf_bsize = 7;
+		break;
+	}
+
+	if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
+		pr_err("r8a66597 pipe memory is insufficient\n");
+		return -ENOMEM;
+	}
+
+	r8a66597_write(r8a66597, pipecfg, PIPECFG);
+	r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
+	r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
+	if (info->interval)
+		info->interval--;
+	r8a66597_write(r8a66597, info->interval, PIPEPERI);
+
+	return 0;
+}
+
+static void pipe_buffer_release(struct r8a66597 *r8a66597,
+				struct r8a66597_pipe_info *info)
+{
+	if (info->pipe == 0)
+		return;
+
+	if (is_bulk_pipe(info->pipe)) {
+		r8a66597->bulk--;
+	} else if (is_interrupt_pipe(info->pipe)) {
+		r8a66597->interrupt--;
+	} else if (is_isoc_pipe(info->pipe)) {
+		r8a66597->isochronous--;
+		if (info->type == R8A66597_BULK)
+			r8a66597->bulk--;
+	} else {
+		dev_err(r8a66597_to_dev(r8a66597),
+			"ep_release: unexpect pipenum (%d)\n", info->pipe);
+	}
+}
+
+static void pipe_initialize(struct r8a66597_ep *ep)
+{
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+
+	r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
+
+	r8a66597_write(r8a66597, ACLRM, ep->pipectr);
+	r8a66597_write(r8a66597, 0, ep->pipectr);
+	r8a66597_write(r8a66597, SQCLR, ep->pipectr);
+	if (ep->use_dma) {
+		r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
+
+		ndelay(450);
+
+		r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
+	}
+}
+
+static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
+				struct r8a66597_ep *ep,
+				const struct usb_endpoint_descriptor *desc,
+				u16 pipenum, int dma)
+{
+	ep->use_dma = 0;
+	ep->fifoaddr = CFIFO;
+	ep->fifosel = CFIFOSEL;
+	ep->fifoctr = CFIFOCTR;
+
+	ep->pipectr = get_pipectr_addr(pipenum);
+	if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
+		ep->pipetre = get_pipetre_addr(pipenum);
+		ep->pipetrn = get_pipetrn_addr(pipenum);
+	} else {
+		ep->pipetre = 0;
+		ep->pipetrn = 0;
+	}
+	ep->pipenum = pipenum;
+	ep->ep.maxpacket = usb_endpoint_maxp(desc);
+	r8a66597->pipenum2ep[pipenum] = ep;
+	r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
+		= ep;
+	INIT_LIST_HEAD(&ep->queue);
+}
+
+static void r8a66597_ep_release(struct r8a66597_ep *ep)
+{
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+	u16 pipenum = ep->pipenum;
+
+	if (pipenum == 0)
+		return;
+
+	if (ep->use_dma)
+		r8a66597->num_dma--;
+	ep->pipenum = 0;
+	ep->busy = 0;
+	ep->use_dma = 0;
+}
+
+static int alloc_pipe_config(struct r8a66597_ep *ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+	struct r8a66597_pipe_info info;
+	int dma = 0;
+	unsigned char *counter;
+	int ret;
+
+	ep->desc = desc;
+
+	if (ep->pipenum)	/* already allocated pipe  */
+		return 0;
+
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:
+		if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
+			if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
+				dev_err(r8a66597_to_dev(r8a66597),
+					"bulk pipe is insufficient\n");
+				return -ENODEV;
+			} else {
+				info.pipe = R8A66597_BASE_PIPENUM_ISOC
+						+ r8a66597->isochronous;
+				counter = &r8a66597->isochronous;
+			}
+		} else {
+			info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
+			counter = &r8a66597->bulk;
+		}
+		info.type = R8A66597_BULK;
+		dma = 1;
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
+			dev_err(r8a66597_to_dev(r8a66597),
+				"interrupt pipe is insufficient\n");
+			return -ENODEV;
+		}
+		info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
+		info.type = R8A66597_INT;
+		counter = &r8a66597->interrupt;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
+			dev_err(r8a66597_to_dev(r8a66597),
+				"isochronous pipe is insufficient\n");
+			return -ENODEV;
+		}
+		info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
+		info.type = R8A66597_ISO;
+		counter = &r8a66597->isochronous;
+		break;
+	default:
+		dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
+		return -EINVAL;
+	}
+	ep->type = info.type;
+
+	info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+	info.maxpacket = usb_endpoint_maxp(desc);
+	info.interval = desc->bInterval;
+	if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+		info.dir_in = 1;
+	else
+		info.dir_in = 0;
+
+	ret = pipe_buffer_setting(r8a66597, &info);
+	if (ret < 0) {
+		dev_err(r8a66597_to_dev(r8a66597),
+			"pipe_buffer_setting fail\n");
+		return ret;
+	}
+
+	(*counter)++;
+	if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
+		r8a66597->bulk++;
+
+	r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
+	pipe_initialize(ep);
+
+	return 0;
+}
+
+static int free_pipe_config(struct r8a66597_ep *ep)
+{
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+	struct r8a66597_pipe_info info;
+
+	info.pipe = ep->pipenum;
+	info.type = ep->type;
+	pipe_buffer_release(r8a66597, &info);
+	r8a66597_ep_release(ep);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	enable_irq_ready(r8a66597, pipenum);
+	enable_irq_nrdy(r8a66597, pipenum);
+}
+
+static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	disable_irq_ready(r8a66597, pipenum);
+	disable_irq_nrdy(r8a66597, pipenum);
+}
+
+/* if complete is true, gadget driver complete function is not call */
+static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
+{
+	r8a66597->ep[0].internal_ccpl = ccpl;
+	pipe_start(r8a66597, 0);
+	r8a66597_bset(r8a66597, CCPL, DCPCTR);
+}
+
+static void start_ep0_write(struct r8a66597_ep *ep,
+				struct r8a66597_request *req)
+{
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+
+	pipe_change(r8a66597, ep->pipenum);
+	r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
+	r8a66597_write(r8a66597, BCLR, ep->fifoctr);
+	if (req->req.length == 0) {
+		r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
+		pipe_start(r8a66597, 0);
+		transfer_complete(ep, req, 0);
+	} else {
+		r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
+		irq_ep0_write(ep, req);
+	}
+}
+
+static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
+			    u16 fifosel)
+{
+	u16 tmp;
+
+	tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
+	if (tmp == pipenum)
+		r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
+}
+
+static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
+			     int enable)
+{
+	struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
+	u16 tmp, toggle;
+
+	/* check current BFRE bit */
+	r8a66597_write(r8a66597, pipenum, PIPESEL);
+	tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
+	if ((enable && tmp) || (!enable && !tmp))
+		return;
+
+	/* change BFRE bit */
+	pipe_stop(r8a66597, pipenum);
+	disable_fifosel(r8a66597, pipenum, CFIFOSEL);
+	disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
+	disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
+
+	toggle = save_usb_toggle(r8a66597, pipenum);
+
+	r8a66597_write(r8a66597, pipenum, PIPESEL);
+	if (enable)
+		r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
+	else
+		r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
+
+	/* initialize for internal BFRE flag */
+	r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
+	r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
+
+	restore_usb_toggle(r8a66597, pipenum, toggle);
+}
+
+static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
+				struct r8a66597_ep *ep,
+				struct r8a66597_request *req)
+{
+	struct r8a66597_dma *dma;
+
+	if (!r8a66597_is_sudmac(r8a66597))
+		return -ENODEV;
+
+	/* Check transfer type */
+	if (!is_bulk_pipe(ep->pipenum))
+		return -EIO;
+
+	if (r8a66597->dma.used)
+		return -EBUSY;
+
+	/* set SUDMAC parameters */
+	dma = &r8a66597->dma;
+	dma->used = 1;
+	if (ep->desc->bEndpointAddress & USB_DIR_IN) {
+		dma->dir = 1;
+	} else {
+		dma->dir = 0;
+		change_bfre_mode(r8a66597, ep->pipenum, 1);
+	}
+
+	/* set r8a66597_ep paramters */
+	ep->use_dma = 1;
+	ep->dma = dma;
+	ep->fifoaddr = D0FIFO;
+	ep->fifosel = D0FIFOSEL;
+	ep->fifoctr = D0FIFOCTR;
+
+	/* dma mapping */
+	return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir);
+}
+
+static void sudmac_free_channel(struct r8a66597 *r8a66597,
+				struct r8a66597_ep *ep,
+				struct r8a66597_request *req)
+{
+	if (!r8a66597_is_sudmac(r8a66597))
+		return;
+
+	usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir);
+
+	r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
+	r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
+
+	ep->dma->used = 0;
+	ep->use_dma = 0;
+	ep->fifoaddr = CFIFO;
+	ep->fifosel = CFIFOSEL;
+	ep->fifoctr = CFIFOCTR;
+}
+
+static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
+			 struct r8a66597_request *req)
+{
+	BUG_ON(req->req.length == 0);
+
+	r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
+	r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
+	r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
+	r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
+
+	r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
+}
+
+static void start_packet_write(struct r8a66597_ep *ep,
+				struct r8a66597_request *req)
+{
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+	u16 tmp;
+
+	pipe_change(r8a66597, ep->pipenum);
+	disable_irq_empty(r8a66597, ep->pipenum);
+	pipe_start(r8a66597, ep->pipenum);
+
+	if (req->req.length == 0) {
+		transfer_complete(ep, req, 0);
+	} else {
+		r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
+		if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
+			/* PIO mode */
+			pipe_change(r8a66597, ep->pipenum);
+			disable_irq_empty(r8a66597, ep->pipenum);
+			pipe_start(r8a66597, ep->pipenum);
+			tmp = r8a66597_read(r8a66597, ep->fifoctr);
+			if (unlikely((tmp & FRDY) == 0))
+				pipe_irq_enable(r8a66597, ep->pipenum);
+			else
+				irq_packet_write(ep, req);
+		} else {
+			/* DMA mode */
+			pipe_change(r8a66597, ep->pipenum);
+			disable_irq_nrdy(r8a66597, ep->pipenum);
+			pipe_start(r8a66597, ep->pipenum);
+			enable_irq_nrdy(r8a66597, ep->pipenum);
+			sudmac_start(r8a66597, ep, req);
+		}
+	}
+}
+
+static void start_packet_read(struct r8a66597_ep *ep,
+				struct r8a66597_request *req)
+{
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+	u16 pipenum = ep->pipenum;
+
+	if (ep->pipenum == 0) {
+		r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
+		r8a66597_write(r8a66597, BCLR, ep->fifoctr);
+		pipe_start(r8a66597, pipenum);
+		pipe_irq_enable(r8a66597, pipenum);
+	} else {
+		pipe_stop(r8a66597, pipenum);
+		if (ep->pipetre) {
+			enable_irq_nrdy(r8a66597, pipenum);
+			r8a66597_write(r8a66597, TRCLR, ep->pipetre);
+			r8a66597_write(r8a66597,
+				DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
+				ep->pipetrn);
+			r8a66597_bset(r8a66597, TRENB, ep->pipetre);
+		}
+
+		if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
+			/* PIO mode */
+			change_bfre_mode(r8a66597, ep->pipenum, 0);
+			pipe_start(r8a66597, pipenum);	/* trigger once */
+			pipe_irq_enable(r8a66597, pipenum);
+		} else {
+			pipe_change(r8a66597, pipenum);
+			sudmac_start(r8a66597, ep, req);
+			pipe_start(r8a66597, pipenum);	/* trigger once */
+		}
+	}
+}
+
+static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
+{
+	if (ep->desc->bEndpointAddress & USB_DIR_IN)
+		start_packet_write(ep, req);
+	else
+		start_packet_read(ep, req);
+}
+
+static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
+{
+	u16 ctsq;
+
+	ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
+
+	switch (ctsq) {
+	case CS_RDDS:
+		start_ep0_write(ep, req);
+		break;
+	case CS_WRDS:
+		start_packet_read(ep, req);
+		break;
+
+	case CS_WRND:
+		control_end(ep->r8a66597, 0);
+		break;
+	default:
+		dev_err(r8a66597_to_dev(ep->r8a66597),
+			"start_ep0: unexpect ctsq(%x)\n", ctsq);
+		break;
+	}
+}
+
+static void init_controller(struct r8a66597 *r8a66597)
+{
+	u16 vif = r8a66597->pdata->vif ? LDRV : 0;
+	u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
+	u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
+
+	if (r8a66597->pdata->on_chip) {
+		if (r8a66597->pdata->buswait)
+			r8a66597_write(r8a66597, r8a66597->pdata->buswait,
+					SYSCFG1);
+		else
+			r8a66597_write(r8a66597, 0x0f, SYSCFG1);
+		r8a66597_bset(r8a66597, HSE, SYSCFG0);
+
+		r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+		r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+		r8a66597_bset(r8a66597, USBE, SYSCFG0);
+
+		r8a66597_bset(r8a66597, SCKE, SYSCFG0);
+
+		r8a66597_bset(r8a66597, irq_sense, INTENB1);
+		r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
+				DMA0CFG);
+	} else {
+		r8a66597_bset(r8a66597, vif | endian, PINCFG);
+		r8a66597_bset(r8a66597, HSE, SYSCFG0);		/* High spd */
+		r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
+				XTAL, SYSCFG0);
+
+		r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+		r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+		r8a66597_bset(r8a66597, USBE, SYSCFG0);
+
+		r8a66597_bset(r8a66597, XCKE, SYSCFG0);
+
+		msleep(3);
+
+		r8a66597_bset(r8a66597, PLLC, SYSCFG0);
+
+		msleep(1);
+
+		r8a66597_bset(r8a66597, SCKE, SYSCFG0);
+
+		r8a66597_bset(r8a66597, irq_sense, INTENB1);
+		r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
+			       DMA0CFG);
+	}
+}
+
+static void disable_controller(struct r8a66597 *r8a66597)
+{
+	if (r8a66597->pdata->on_chip) {
+		r8a66597_bset(r8a66597, SCKE, SYSCFG0);
+		r8a66597_bclr(r8a66597, UTST, TESTMODE);
+
+		/* disable interrupts */
+		r8a66597_write(r8a66597, 0, INTENB0);
+		r8a66597_write(r8a66597, 0, INTENB1);
+		r8a66597_write(r8a66597, 0, BRDYENB);
+		r8a66597_write(r8a66597, 0, BEMPENB);
+		r8a66597_write(r8a66597, 0, NRDYENB);
+
+		/* clear status */
+		r8a66597_write(r8a66597, 0, BRDYSTS);
+		r8a66597_write(r8a66597, 0, NRDYSTS);
+		r8a66597_write(r8a66597, 0, BEMPSTS);
+
+		r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+		r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
+
+	} else {
+		r8a66597_bclr(r8a66597, UTST, TESTMODE);
+		r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
+		udelay(1);
+		r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
+		udelay(1);
+		udelay(1);
+		r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
+	}
+}
+
+static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
+{
+	u16 tmp;
+
+	if (!r8a66597->pdata->on_chip) {
+		tmp = r8a66597_read(r8a66597, SYSCFG0);
+		if (!(tmp & XCKE))
+			r8a66597_bset(r8a66597, XCKE, SYSCFG0);
+	}
+}
+
+static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
+{
+	return list_entry(ep->queue.next, struct r8a66597_request, queue);
+}
+
+/*-------------------------------------------------------------------------*/
+static void transfer_complete(struct r8a66597_ep *ep,
+		struct r8a66597_request *req, int status)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+	int restart = 0;
+
+	if (unlikely(ep->pipenum == 0)) {
+		if (ep->internal_ccpl) {
+			ep->internal_ccpl = 0;
+			return;
+		}
+	}
+
+	list_del_init(&req->queue);
+	if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
+		req->req.status = -ESHUTDOWN;
+	else
+		req->req.status = status;
+
+	if (!list_empty(&ep->queue))
+		restart = 1;
+
+	if (ep->use_dma)
+		sudmac_free_channel(ep->r8a66597, ep, req);
+
+	spin_unlock(&ep->r8a66597->lock);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&ep->r8a66597->lock);
+
+	if (restart) {
+		req = get_request_from_ep(ep);
+		if (ep->desc)
+			start_packet(ep, req);
+	}
+}
+
+static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
+{
+	int i;
+	u16 tmp;
+	unsigned bufsize;
+	size_t size;
+	void *buf;
+	u16 pipenum = ep->pipenum;
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+
+	pipe_change(r8a66597, pipenum);
+	r8a66597_bset(r8a66597, ISEL, ep->fifosel);
+
+	i = 0;
+	do {
+		tmp = r8a66597_read(r8a66597, ep->fifoctr);
+		if (i++ > 100000) {
+			dev_err(r8a66597_to_dev(r8a66597),
+				"pipe0 is busy. maybe cpu i/o bus "
+				"conflict. please power off this controller.");
+			return;
+		}
+		ndelay(1);
+	} while ((tmp & FRDY) == 0);
+
+	/* prepare parameters */
+	bufsize = get_buffer_size(r8a66597, pipenum);
+	buf = req->req.buf + req->req.actual;
+	size = min(bufsize, req->req.length - req->req.actual);
+
+	/* write fifo */
+	if (req->req.buf) {
+		if (size > 0)
+			r8a66597_write_fifo(r8a66597, ep, buf, size);
+		if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
+			r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
+	}
+
+	/* update parameters */
+	req->req.actual += size;
+
+	/* check transfer finish */
+	if ((!req->req.zero && (req->req.actual == req->req.length))
+			|| (size % ep->ep.maxpacket)
+			|| (size == 0)) {
+		disable_irq_ready(r8a66597, pipenum);
+		disable_irq_empty(r8a66597, pipenum);
+	} else {
+		disable_irq_ready(r8a66597, pipenum);
+		enable_irq_empty(r8a66597, pipenum);
+	}
+	pipe_start(r8a66597, pipenum);
+}
+
+static void irq_packet_write(struct r8a66597_ep *ep,
+				struct r8a66597_request *req)
+{
+	u16 tmp;
+	unsigned bufsize;
+	size_t size;
+	void *buf;
+	u16 pipenum = ep->pipenum;
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+
+	pipe_change(r8a66597, pipenum);
+	tmp = r8a66597_read(r8a66597, ep->fifoctr);
+	if (unlikely((tmp & FRDY) == 0)) {
+		pipe_stop(r8a66597, pipenum);
+		pipe_irq_disable(r8a66597, pipenum);
+		dev_err(r8a66597_to_dev(r8a66597),
+			"write fifo not ready. pipnum=%d\n", pipenum);
+		return;
+	}
+
+	/* prepare parameters */
+	bufsize = get_buffer_size(r8a66597, pipenum);
+	buf = req->req.buf + req->req.actual;
+	size = min(bufsize, req->req.length - req->req.actual);
+
+	/* write fifo */
+	if (req->req.buf) {
+		r8a66597_write_fifo(r8a66597, ep, buf, size);
+		if ((size == 0)
+				|| ((size % ep->ep.maxpacket) != 0)
+				|| ((bufsize != ep->ep.maxpacket)
+					&& (bufsize > size)))
+			r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
+	}
+
+	/* update parameters */
+	req->req.actual += size;
+	/* check transfer finish */
+	if ((!req->req.zero && (req->req.actual == req->req.length))
+			|| (size % ep->ep.maxpacket)
+			|| (size == 0)) {
+		disable_irq_ready(r8a66597, pipenum);
+		enable_irq_empty(r8a66597, pipenum);
+	} else {
+		disable_irq_empty(r8a66597, pipenum);
+		pipe_irq_enable(r8a66597, pipenum);
+	}
+}
+
+static void irq_packet_read(struct r8a66597_ep *ep,
+				struct r8a66597_request *req)
+{
+	u16 tmp;
+	int rcv_len, bufsize, req_len;
+	int size;
+	void *buf;
+	u16 pipenum = ep->pipenum;
+	struct r8a66597 *r8a66597 = ep->r8a66597;
+	int finish = 0;
+
+	pipe_change(r8a66597, pipenum);
+	tmp = r8a66597_read(r8a66597, ep->fifoctr);
+	if (unlikely((tmp & FRDY) == 0)) {
+		req->req.status = -EPIPE;
+		pipe_stop(r8a66597, pipenum);
+		pipe_irq_disable(r8a66597, pipenum);
+		dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
+		return;
+	}
+
+	/* prepare parameters */
+	rcv_len = tmp & DTLN;
+	bufsize = get_buffer_size(r8a66597, pipenum);
+
+	buf = req->req.buf + req->req.actual;
+	req_len = req->req.length - req->req.actual;
+	if (rcv_len < bufsize)
+		size = min(rcv_len, req_len);
+	else
+		size = min(bufsize, req_len);
+
+	/* update parameters */
+	req->req.actual += size;
+
+	/* check transfer finish */
+	if ((!req->req.zero && (req->req.actual == req->req.length))
+			|| (size % ep->ep.maxpacket)
+			|| (size == 0)) {
+		pipe_stop(r8a66597, pipenum);
+		pipe_irq_disable(r8a66597, pipenum);
+		finish = 1;
+	}
+
+	/* read fifo */
+	if (req->req.buf) {
+		if (size == 0)
+			r8a66597_write(r8a66597, BCLR, ep->fifoctr);
+		else
+			r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
+
+	}
+
+	if ((ep->pipenum != 0) && finish)
+		transfer_complete(ep, req, 0);
+}
+
+static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
+{
+	u16 check;
+	u16 pipenum;
+	struct r8a66597_ep *ep;
+	struct r8a66597_request *req;
+
+	if ((status & BRDY0) && (enb & BRDY0)) {
+		r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
+		r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
+
+		ep = &r8a66597->ep[0];
+		req = get_request_from_ep(ep);
+		irq_packet_read(ep, req);
+	} else {
+		for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+			check = 1 << pipenum;
+			if ((status & check) && (enb & check)) {
+				r8a66597_write(r8a66597, ~check, BRDYSTS);
+				ep = r8a66597->pipenum2ep[pipenum];
+				req = get_request_from_ep(ep);
+				if (ep->desc->bEndpointAddress & USB_DIR_IN)
+					irq_packet_write(ep, req);
+				else
+					irq_packet_read(ep, req);
+			}
+		}
+	}
+}
+
+static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
+{
+	u16 tmp;
+	u16 check;
+	u16 pipenum;
+	struct r8a66597_ep *ep;
+	struct r8a66597_request *req;
+
+	if ((status & BEMP0) && (enb & BEMP0)) {
+		r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
+
+		ep = &r8a66597->ep[0];
+		req = get_request_from_ep(ep);
+		irq_ep0_write(ep, req);
+	} else {
+		for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+			check = 1 << pipenum;
+			if ((status & check) && (enb & check)) {
+				r8a66597_write(r8a66597, ~check, BEMPSTS);
+				tmp = control_reg_get(r8a66597, pipenum);
+				if ((tmp & INBUFM) == 0) {
+					disable_irq_empty(r8a66597, pipenum);
+					pipe_irq_disable(r8a66597, pipenum);
+					pipe_stop(r8a66597, pipenum);
+					ep = r8a66597->pipenum2ep[pipenum];
+					req = get_request_from_ep(ep);
+					if (!list_empty(&ep->queue))
+						transfer_complete(ep, req, 0);
+				}
+			}
+		}
+	}
+}
+
+static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+	struct r8a66597_ep *ep;
+	u16 pid;
+	u16 status = 0;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		status = 1 << USB_DEVICE_SELF_POWERED;
+		break;
+	case USB_RECIP_INTERFACE:
+		status = 0;
+		break;
+	case USB_RECIP_ENDPOINT:
+		ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+		pid = control_reg_get_pid(r8a66597, ep->pipenum);
+		if (pid == PID_STALL)
+			status = 1 << USB_ENDPOINT_HALT;
+		else
+			status = 0;
+		break;
+	default:
+		pipe_stall(r8a66597, 0);
+		return;		/* exit */
+	}
+
+	r8a66597->ep0_data = cpu_to_le16(status);
+	r8a66597->ep0_req->buf = &r8a66597->ep0_data;
+	r8a66597->ep0_req->length = 2;
+	/* AV: what happens if we get called again before that gets through? */
+	spin_unlock(&r8a66597->lock);
+	r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+	spin_lock(&r8a66597->lock);
+}
+
+static void clear_feature(struct r8a66597 *r8a66597,
+				struct usb_ctrlrequest *ctrl)
+{
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		control_end(r8a66597, 1);
+		break;
+	case USB_RECIP_INTERFACE:
+		control_end(r8a66597, 1);
+		break;
+	case USB_RECIP_ENDPOINT: {
+		struct r8a66597_ep *ep;
+		struct r8a66597_request *req;
+		u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+		ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+		if (!ep->wedge) {
+			pipe_stop(r8a66597, ep->pipenum);
+			control_reg_sqclr(r8a66597, ep->pipenum);
+			spin_unlock(&r8a66597->lock);
+			usb_ep_clear_halt(&ep->ep);
+			spin_lock(&r8a66597->lock);
+		}
+
+		control_end(r8a66597, 1);
+
+		req = get_request_from_ep(ep);
+		if (ep->busy) {
+			ep->busy = 0;
+			if (list_empty(&ep->queue))
+				break;
+			start_packet(ep, req);
+		} else if (!list_empty(&ep->queue))
+			pipe_start(r8a66597, ep->pipenum);
+		}
+		break;
+	default:
+		pipe_stall(r8a66597, 0);
+		break;
+	}
+}
+
+static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
+{
+	u16 tmp;
+	int timeout = 3000;
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		switch (le16_to_cpu(ctrl->wValue)) {
+		case USB_DEVICE_TEST_MODE:
+			control_end(r8a66597, 1);
+			/* Wait for the completion of status stage */
+			do {
+				tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
+				udelay(1);
+			} while (tmp != CS_IDST || timeout-- > 0);
+
+			if (tmp == CS_IDST)
+				r8a66597_bset(r8a66597,
+					      le16_to_cpu(ctrl->wIndex >> 8),
+					      TESTMODE);
+			break;
+		default:
+			pipe_stall(r8a66597, 0);
+			break;
+		}
+		break;
+	case USB_RECIP_INTERFACE:
+		control_end(r8a66597, 1);
+		break;
+	case USB_RECIP_ENDPOINT: {
+		struct r8a66597_ep *ep;
+		u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+		ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+		pipe_stall(r8a66597, ep->pipenum);
+
+		control_end(r8a66597, 1);
+		}
+		break;
+	default:
+		pipe_stall(r8a66597, 0);
+		break;
+	}
+}
+
+/* if return value is true, call class driver's setup() */
+static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
+{
+	u16 *p = (u16 *)ctrl;
+	unsigned long offset = USBREQ;
+	int i, ret = 0;
+
+	/* read fifo */
+	r8a66597_write(r8a66597, ~VALID, INTSTS0);
+
+	for (i = 0; i < 4; i++)
+		p[i] = r8a66597_read(r8a66597, offset + i*2);
+
+	/* check request */
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (ctrl->bRequest) {
+		case USB_REQ_GET_STATUS:
+			get_status(r8a66597, ctrl);
+			break;
+		case USB_REQ_CLEAR_FEATURE:
+			clear_feature(r8a66597, ctrl);
+			break;
+		case USB_REQ_SET_FEATURE:
+			set_feature(r8a66597, ctrl);
+			break;
+		default:
+			ret = 1;
+			break;
+		}
+	} else
+		ret = 1;
+	return ret;
+}
+
+static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
+{
+	u16 speed = get_usb_speed(r8a66597);
+
+	switch (speed) {
+	case HSMODE:
+		r8a66597->gadget.speed = USB_SPEED_HIGH;
+		break;
+	case FSMODE:
+		r8a66597->gadget.speed = USB_SPEED_FULL;
+		break;
+	default:
+		r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
+		dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
+	}
+}
+
+static void irq_device_state(struct r8a66597 *r8a66597)
+{
+	u16 dvsq;
+
+	dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
+	r8a66597_write(r8a66597, ~DVST, INTSTS0);
+
+	if (dvsq == DS_DFLT) {
+		/* bus reset */
+		spin_unlock(&r8a66597->lock);
+		r8a66597->driver->disconnect(&r8a66597->gadget);
+		spin_lock(&r8a66597->lock);
+		r8a66597_update_usb_speed(r8a66597);
+	}
+	if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
+		r8a66597_update_usb_speed(r8a66597);
+	if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
+			&& r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
+		r8a66597_update_usb_speed(r8a66597);
+
+	r8a66597->old_dvsq = dvsq;
+}
+
+static void irq_control_stage(struct r8a66597 *r8a66597)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+	struct usb_ctrlrequest ctrl;
+	u16 ctsq;
+
+	ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
+	r8a66597_write(r8a66597, ~CTRT, INTSTS0);
+
+	switch (ctsq) {
+	case CS_IDST: {
+		struct r8a66597_ep *ep;
+		struct r8a66597_request *req;
+		ep = &r8a66597->ep[0];
+		req = get_request_from_ep(ep);
+		transfer_complete(ep, req, 0);
+		}
+		break;
+
+	case CS_RDDS:
+	case CS_WRDS:
+	case CS_WRND:
+		if (setup_packet(r8a66597, &ctrl)) {
+			spin_unlock(&r8a66597->lock);
+			if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
+				< 0)
+				pipe_stall(r8a66597, 0);
+			spin_lock(&r8a66597->lock);
+		}
+		break;
+	case CS_RDSS:
+	case CS_WRSS:
+		control_end(r8a66597, 0);
+		break;
+	default:
+		dev_err(r8a66597_to_dev(r8a66597),
+			"ctrl_stage: unexpect ctsq(%x)\n", ctsq);
+		break;
+	}
+}
+
+static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
+{
+	u16 pipenum;
+	struct r8a66597_request *req;
+	u32 len;
+	int i = 0;
+
+	pipenum = ep->pipenum;
+	pipe_change(r8a66597, pipenum);
+
+	while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
+		udelay(1);
+		if (unlikely(i++ >= 10000)) {	/* timeout = 10 msec */
+			dev_err(r8a66597_to_dev(r8a66597),
+				"%s: FRDY was not set (%d)\n",
+				__func__, pipenum);
+			return;
+		}
+	}
+
+	r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
+	req = get_request_from_ep(ep);
+
+	/* prepare parameters */
+	len = r8a66597_sudmac_read(r8a66597, CH0CBC);
+	req->req.actual += len;
+
+	/* clear */
+	r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
+
+	/* check transfer finish */
+	if ((!req->req.zero && (req->req.actual == req->req.length))
+			|| (len % ep->ep.maxpacket)) {
+		if (ep->dma->dir) {
+			disable_irq_ready(r8a66597, pipenum);
+			enable_irq_empty(r8a66597, pipenum);
+		} else {
+			/* Clear the interrupt flag for next transfer */
+			r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
+			transfer_complete(ep, req, 0);
+		}
+	}
+}
+
+static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
+{
+	u32 irqsts;
+	struct r8a66597_ep *ep;
+	u16 pipenum;
+
+	irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
+	if (irqsts & CH0ENDS) {
+		r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
+		pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
+		ep = r8a66597->pipenum2ep[pipenum];
+		sudmac_finish(r8a66597, ep);
+	}
+}
+
+static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
+{
+	struct r8a66597 *r8a66597 = _r8a66597;
+	u16 intsts0;
+	u16 intenb0;
+	u16 brdysts, nrdysts, bempsts;
+	u16 brdyenb, nrdyenb, bempenb;
+	u16 savepipe;
+	u16 mask0;
+
+	if (r8a66597_is_sudmac(r8a66597))
+		r8a66597_sudmac_irq(r8a66597);
+
+	spin_lock(&r8a66597->lock);
+
+	intsts0 = r8a66597_read(r8a66597, INTSTS0);
+	intenb0 = r8a66597_read(r8a66597, INTENB0);
+
+	savepipe = r8a66597_read(r8a66597, CFIFOSEL);
+
+	mask0 = intsts0 & intenb0;
+	if (mask0) {
+		brdysts = r8a66597_read(r8a66597, BRDYSTS);
+		nrdysts = r8a66597_read(r8a66597, NRDYSTS);
+		bempsts = r8a66597_read(r8a66597, BEMPSTS);
+		brdyenb = r8a66597_read(r8a66597, BRDYENB);
+		nrdyenb = r8a66597_read(r8a66597, NRDYENB);
+		bempenb = r8a66597_read(r8a66597, BEMPENB);
+
+		if (mask0 & VBINT) {
+			r8a66597_write(r8a66597,  0xffff & ~VBINT,
+					INTSTS0);
+			r8a66597_start_xclock(r8a66597);
+
+			/* start vbus sampling */
+			r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
+					& VBSTS;
+			r8a66597->scount = R8A66597_MAX_SAMPLING;
+
+			mod_timer(&r8a66597->timer,
+					jiffies + msecs_to_jiffies(50));
+		}
+		if (intsts0 & DVSQ)
+			irq_device_state(r8a66597);
+
+		if ((intsts0 & BRDY) && (intenb0 & BRDYE)
+				&& (brdysts & brdyenb))
+			irq_pipe_ready(r8a66597, brdysts, brdyenb);
+		if ((intsts0 & BEMP) && (intenb0 & BEMPE)
+				&& (bempsts & bempenb))
+			irq_pipe_empty(r8a66597, bempsts, bempenb);
+
+		if (intsts0 & CTRT)
+			irq_control_stage(r8a66597);
+	}
+
+	r8a66597_write(r8a66597, savepipe, CFIFOSEL);
+
+	spin_unlock(&r8a66597->lock);
+	return IRQ_HANDLED;
+}
+
+static void r8a66597_timer(unsigned long _r8a66597)
+{
+	struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+	unsigned long flags;
+	u16 tmp;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+	tmp = r8a66597_read(r8a66597, SYSCFG0);
+	if (r8a66597->scount > 0) {
+		tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
+		if (tmp == r8a66597->old_vbus) {
+			r8a66597->scount--;
+			if (r8a66597->scount == 0) {
+				if (tmp == VBSTS)
+					r8a66597_usb_connect(r8a66597);
+				else
+					r8a66597_usb_disconnect(r8a66597);
+			} else {
+				mod_timer(&r8a66597->timer,
+					jiffies + msecs_to_jiffies(50));
+			}
+		} else {
+			r8a66597->scount = R8A66597_MAX_SAMPLING;
+			r8a66597->old_vbus = tmp;
+			mod_timer(&r8a66597->timer,
+					jiffies + msecs_to_jiffies(50));
+		}
+	}
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+static int r8a66597_enable(struct usb_ep *_ep,
+			 const struct usb_endpoint_descriptor *desc)
+{
+	struct r8a66597_ep *ep;
+
+	ep = container_of(_ep, struct r8a66597_ep, ep);
+	return alloc_pipe_config(ep, desc);
+}
+
+static int r8a66597_disable(struct usb_ep *_ep)
+{
+	struct r8a66597_ep *ep;
+	struct r8a66597_request *req;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct r8a66597_ep, ep);
+	BUG_ON(!ep);
+
+	while (!list_empty(&ep->queue)) {
+		req = get_request_from_ep(ep);
+		spin_lock_irqsave(&ep->r8a66597->lock, flags);
+		transfer_complete(ep, req, -ECONNRESET);
+		spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+	}
+
+	pipe_irq_disable(ep->r8a66597, ep->pipenum);
+	return free_pipe_config(ep);
+}
+
+static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
+						gfp_t gfp_flags)
+{
+	struct r8a66597_request *req;
+
+	req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct r8a66597_request *req;
+
+	req = container_of(_req, struct r8a66597_request, req);
+	kfree(req);
+}
+
+static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
+			gfp_t gfp_flags)
+{
+	struct r8a66597_ep *ep;
+	struct r8a66597_request *req;
+	unsigned long flags;
+	int request = 0;
+
+	ep = container_of(_ep, struct r8a66597_ep, ep);
+	req = container_of(_req, struct r8a66597_request, req);
+
+	if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&ep->r8a66597->lock, flags);
+
+	if (list_empty(&ep->queue))
+		request = 1;
+
+	list_add_tail(&req->queue, &ep->queue);
+	req->req.actual = 0;
+	req->req.status = -EINPROGRESS;
+
+	if (ep->desc == NULL)	/* control */
+		start_ep0(ep, req);
+	else {
+		if (request && !ep->busy)
+			start_packet(ep, req);
+	}
+
+	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+
+	return 0;
+}
+
+static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct r8a66597_ep *ep;
+	struct r8a66597_request *req;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct r8a66597_ep, ep);
+	req = container_of(_req, struct r8a66597_request, req);
+
+	spin_lock_irqsave(&ep->r8a66597->lock, flags);
+	if (!list_empty(&ep->queue))
+		transfer_complete(ep, req, -ECONNRESET);
+	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+
+	return 0;
+}
+
+static int r8a66597_set_halt(struct usb_ep *_ep, int value)
+{
+	struct r8a66597_ep *ep;
+	struct r8a66597_request *req;
+	unsigned long flags;
+	int ret = 0;
+
+	ep = container_of(_ep, struct r8a66597_ep, ep);
+	req = get_request_from_ep(ep);
+
+	spin_lock_irqsave(&ep->r8a66597->lock, flags);
+	if (!list_empty(&ep->queue)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	if (value) {
+		ep->busy = 1;
+		pipe_stall(ep->r8a66597, ep->pipenum);
+	} else {
+		ep->busy = 0;
+		ep->wedge = 0;
+		pipe_stop(ep->r8a66597, ep->pipenum);
+	}
+
+out:
+	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+	return ret;
+}
+
+static int r8a66597_set_wedge(struct usb_ep *_ep)
+{
+	struct r8a66597_ep *ep;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct r8a66597_ep, ep);
+
+	if (!ep || !ep->desc)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->r8a66597->lock, flags);
+	ep->wedge = 1;
+	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+
+	return usb_ep_set_halt(_ep);
+}
+
+static void r8a66597_fifo_flush(struct usb_ep *_ep)
+{
+	struct r8a66597_ep *ep;
+	unsigned long flags;
+
+	ep = container_of(_ep, struct r8a66597_ep, ep);
+	spin_lock_irqsave(&ep->r8a66597->lock, flags);
+	if (list_empty(&ep->queue) && !ep->busy) {
+		pipe_stop(ep->r8a66597, ep->pipenum);
+		r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
+		r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
+		r8a66597_write(ep->r8a66597, 0, ep->pipectr);
+	}
+	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+}
+
+static struct usb_ep_ops r8a66597_ep_ops = {
+	.enable		= r8a66597_enable,
+	.disable	= r8a66597_disable,
+
+	.alloc_request	= r8a66597_alloc_request,
+	.free_request	= r8a66597_free_request,
+
+	.queue		= r8a66597_queue,
+	.dequeue	= r8a66597_dequeue,
+
+	.set_halt	= r8a66597_set_halt,
+	.set_wedge	= r8a66597_set_wedge,
+	.fifo_flush	= r8a66597_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+static int r8a66597_start(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+
+	if (!driver
+			|| driver->max_speed < USB_SPEED_HIGH
+			|| !driver->setup)
+		return -EINVAL;
+	if (!r8a66597)
+		return -ENODEV;
+
+	/* hook up the driver */
+	r8a66597->driver = driver;
+
+	init_controller(r8a66597);
+	r8a66597_bset(r8a66597, VBSE, INTENB0);
+	if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
+		r8a66597_start_xclock(r8a66597);
+		/* start vbus sampling */
+		r8a66597->old_vbus = r8a66597_read(r8a66597,
+					 INTSTS0) & VBSTS;
+		r8a66597->scount = R8A66597_MAX_SAMPLING;
+		mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
+	}
+
+	return 0;
+}
+
+static int r8a66597_stop(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+	unsigned long flags;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+	r8a66597_bclr(r8a66597, VBSE, INTENB0);
+	disable_controller(r8a66597);
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+
+	r8a66597->driver = NULL;
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static int r8a66597_get_frame(struct usb_gadget *_gadget)
+{
+	struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
+	return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
+}
+
+static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+	unsigned long flags;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+	if (is_on)
+		r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
+	else
+		r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+
+	return 0;
+}
+
+static struct usb_gadget_ops r8a66597_gadget_ops = {
+	.get_frame		= r8a66597_get_frame,
+	.udc_start		= r8a66597_start,
+	.udc_stop		= r8a66597_stop,
+	.pullup			= r8a66597_pullup,
+};
+
+static int __exit r8a66597_remove(struct platform_device *pdev)
+{
+	struct r8a66597		*r8a66597 = dev_get_drvdata(&pdev->dev);
+
+	usb_del_gadget_udc(&r8a66597->gadget);
+	del_timer_sync(&r8a66597->timer);
+	iounmap(r8a66597->reg);
+	if (r8a66597->pdata->sudmac)
+		iounmap(r8a66597->sudmac_reg);
+	free_irq(platform_get_irq(pdev, 0), r8a66597);
+	r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
+#ifdef CONFIG_HAVE_CLK
+	if (r8a66597->pdata->on_chip) {
+		clk_disable(r8a66597->clk);
+		clk_put(r8a66597->clk);
+	}
+#endif
+	device_unregister(&r8a66597->gadget.dev);
+	kfree(r8a66597);
+	return 0;
+}
+
+static void nop_completion(struct usb_ep *ep, struct usb_request *r)
+{
+}
+
+static int __init r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
+					  struct platform_device *pdev)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sudmac");
+	if (!res) {
+		dev_err(&pdev->dev, "platform_get_resource error(sudmac).\n");
+		return -ENODEV;
+	}
+
+	r8a66597->sudmac_reg = ioremap(res->start, resource_size(res));
+	if (r8a66597->sudmac_reg == NULL) {
+		dev_err(&pdev->dev, "ioremap error(sudmac).\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int __init r8a66597_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_HAVE_CLK
+	char clk_name[8];
+#endif
+	struct resource *res, *ires;
+	int irq;
+	void __iomem *reg = NULL;
+	struct r8a66597 *r8a66597 = NULL;
+	int ret = 0;
+	int i;
+	unsigned long irq_trigger;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "platform_get_resource error.\n");
+		goto clean_up;
+	}
+
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	irq = ires->start;
+	irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
+
+	if (irq < 0) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "platform_get_irq error.\n");
+		goto clean_up;
+	}
+
+	reg = ioremap(res->start, resource_size(res));
+	if (reg == NULL) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "ioremap error.\n");
+		goto clean_up;
+	}
+
+	/* initialize ucd */
+	r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
+	if (r8a66597 == NULL) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "kzalloc error\n");
+		goto clean_up;
+	}
+
+	spin_lock_init(&r8a66597->lock);
+	dev_set_drvdata(&pdev->dev, r8a66597);
+	r8a66597->pdata = pdev->dev.platform_data;
+	r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
+
+	r8a66597->gadget.ops = &r8a66597_gadget_ops;
+	dev_set_name(&r8a66597->gadget.dev, "gadget");
+	r8a66597->gadget.max_speed = USB_SPEED_HIGH;
+	r8a66597->gadget.dev.parent = &pdev->dev;
+	r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	r8a66597->gadget.dev.release = pdev->dev.release;
+	r8a66597->gadget.name = udc_name;
+	ret = device_register(&r8a66597->gadget.dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "device_register failed\n");
+		goto clean_up;
+	}
+
+	init_timer(&r8a66597->timer);
+	r8a66597->timer.function = r8a66597_timer;
+	r8a66597->timer.data = (unsigned long)r8a66597;
+	r8a66597->reg = reg;
+
+#ifdef CONFIG_HAVE_CLK
+	if (r8a66597->pdata->on_chip) {
+		snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
+		r8a66597->clk = clk_get(&pdev->dev, clk_name);
+		if (IS_ERR(r8a66597->clk)) {
+			dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
+				clk_name);
+			ret = PTR_ERR(r8a66597->clk);
+			goto clean_up_dev;
+		}
+		clk_enable(r8a66597->clk);
+	}
+#endif
+	if (r8a66597->pdata->sudmac) {
+		ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
+		if (ret < 0)
+			goto clean_up2;
+	}
+
+	disable_controller(r8a66597); /* make sure controller is disabled */
+
+	ret = request_irq(irq, r8a66597_irq, IRQF_SHARED,
+			udc_name, r8a66597);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "request_irq error (%d)\n", ret);
+		goto clean_up2;
+	}
+
+	INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
+	r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
+	INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
+	for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
+		struct r8a66597_ep *ep = &r8a66597->ep[i];
+
+		if (i != 0) {
+			INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
+			list_add_tail(&r8a66597->ep[i].ep.ep_list,
+					&r8a66597->gadget.ep_list);
+		}
+		ep->r8a66597 = r8a66597;
+		INIT_LIST_HEAD(&ep->queue);
+		ep->ep.name = r8a66597_ep_name[i];
+		ep->ep.ops = &r8a66597_ep_ops;
+		ep->ep.maxpacket = 512;
+	}
+	r8a66597->ep[0].ep.maxpacket = 64;
+	r8a66597->ep[0].pipenum = 0;
+	r8a66597->ep[0].fifoaddr = CFIFO;
+	r8a66597->ep[0].fifosel = CFIFOSEL;
+	r8a66597->ep[0].fifoctr = CFIFOCTR;
+	r8a66597->ep[0].pipectr = get_pipectr_addr(0);
+	r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
+	r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
+
+	r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
+							GFP_KERNEL);
+	if (r8a66597->ep0_req == NULL)
+		goto clean_up3;
+	r8a66597->ep0_req->complete = nop_completion;
+
+	ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
+	if (ret)
+		goto err_add_udc;
+
+	dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+	return 0;
+
+err_add_udc:
+	r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
+clean_up3:
+	free_irq(irq, r8a66597);
+clean_up2:
+#ifdef CONFIG_HAVE_CLK
+	if (r8a66597->pdata->on_chip) {
+		clk_disable(r8a66597->clk);
+		clk_put(r8a66597->clk);
+	}
+clean_up_dev:
+#endif
+	device_unregister(&r8a66597->gadget.dev);
+clean_up:
+	if (r8a66597) {
+		if (r8a66597->sudmac_reg)
+			iounmap(r8a66597->sudmac_reg);
+		if (r8a66597->ep0_req)
+			r8a66597_free_request(&r8a66597->ep[0].ep,
+						r8a66597->ep0_req);
+		kfree(r8a66597);
+	}
+	if (reg)
+		iounmap(reg);
+
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+static struct platform_driver r8a66597_driver = {
+	.remove =	__exit_p(r8a66597_remove),
+	.driver		= {
+		.name =	(char *) udc_name,
+	},
+};
+MODULE_ALIAS("platform:r8a66597_udc");
+
+static int __init r8a66597_udc_init(void)
+{
+	return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
+}
+module_init(r8a66597_udc_init);
+
+static void __exit r8a66597_udc_cleanup(void)
+{
+	platform_driver_unregister(&r8a66597_driver);
+}
+module_exit(r8a66597_udc_cleanup);
+
+MODULE_DESCRIPTION("R8A66597 USB gadget driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/r8a66597-udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/r8a66597-udc.h
new file mode 100644
index 0000000..8e3de61
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/r8a66597-udc.h
@@ -0,0 +1,294 @@
+/*
+ * R8A66597 UDC
+ *
+ * Copyright (C) 2007-2009 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __R8A66597_H__
+#define __R8A66597_H__
+
+#ifdef CONFIG_HAVE_CLK
+#include <linux/clk.h>
+#endif
+
+#include <linux/usb/r8a66597.h>
+
+#define R8A66597_MAX_SAMPLING	10
+
+#define R8A66597_MAX_NUM_PIPE	8
+#define R8A66597_MAX_NUM_BULK	3
+#define R8A66597_MAX_NUM_ISOC	2
+#define R8A66597_MAX_NUM_INT	2
+
+#define R8A66597_BASE_PIPENUM_BULK	3
+#define R8A66597_BASE_PIPENUM_ISOC	1
+#define R8A66597_BASE_PIPENUM_INT	6
+
+#define R8A66597_BASE_BUFNUM	6
+#define R8A66597_MAX_BUFNUM	0x4F
+
+#define is_bulk_pipe(pipenum)	\
+	((pipenum >= R8A66597_BASE_PIPENUM_BULK) && \
+	 (pipenum < (R8A66597_BASE_PIPENUM_BULK + R8A66597_MAX_NUM_BULK)))
+#define is_interrupt_pipe(pipenum)	\
+	((pipenum >= R8A66597_BASE_PIPENUM_INT) && \
+	 (pipenum < (R8A66597_BASE_PIPENUM_INT + R8A66597_MAX_NUM_INT)))
+#define is_isoc_pipe(pipenum)	\
+	((pipenum >= R8A66597_BASE_PIPENUM_ISOC) && \
+	 (pipenum < (R8A66597_BASE_PIPENUM_ISOC + R8A66597_MAX_NUM_ISOC)))
+
+#define r8a66597_is_sudmac(r8a66597)	(r8a66597->pdata->sudmac)
+struct r8a66597_pipe_info {
+	u16	pipe;
+	u16	epnum;
+	u16	maxpacket;
+	u16	type;
+	u16	interval;
+	u16	dir_in;
+};
+
+struct r8a66597_request {
+	struct usb_request	req;
+	struct list_head	queue;
+};
+
+struct r8a66597_ep {
+	struct usb_ep		ep;
+	struct r8a66597		*r8a66597;
+	struct r8a66597_dma	*dma;
+
+	struct list_head	queue;
+	unsigned		busy:1;
+	unsigned		wedge:1;
+	unsigned		internal_ccpl:1;	/* use only control */
+
+	/* this member can able to after r8a66597_enable */
+	unsigned		use_dma:1;
+	u16			pipenum;
+	u16			type;
+	const struct usb_endpoint_descriptor	*desc;
+	/* register address */
+	unsigned char		fifoaddr;
+	unsigned char		fifosel;
+	unsigned char		fifoctr;
+	unsigned char		pipectr;
+	unsigned char		pipetre;
+	unsigned char		pipetrn;
+};
+
+struct r8a66597_dma {
+	unsigned		used:1;
+	unsigned		dir:1;	/* 1 = IN(write), 0 = OUT(read) */
+};
+
+struct r8a66597 {
+	spinlock_t		lock;
+	void __iomem		*reg;
+	void __iomem		*sudmac_reg;
+
+#ifdef CONFIG_HAVE_CLK
+	struct clk *clk;
+#endif
+	struct r8a66597_platdata	*pdata;
+
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+
+	struct r8a66597_ep	ep[R8A66597_MAX_NUM_PIPE];
+	struct r8a66597_ep	*pipenum2ep[R8A66597_MAX_NUM_PIPE];
+	struct r8a66597_ep	*epaddr2ep[16];
+	struct r8a66597_dma	dma;
+
+	struct timer_list	timer;
+	struct usb_request	*ep0_req;	/* for internal request */
+	u16			ep0_data;	/* for internal request */
+	u16			old_vbus;
+	u16			scount;
+	u16			old_dvsq;
+
+	/* pipe config */
+	unsigned char bulk;
+	unsigned char interrupt;
+	unsigned char isochronous;
+	unsigned char num_dma;
+
+	unsigned irq_sense_low:1;
+};
+
+#define gadget_to_r8a66597(_gadget)	\
+		container_of(_gadget, struct r8a66597, gadget)
+#define r8a66597_to_gadget(r8a66597) (&r8a66597->gadget)
+#define r8a66597_to_dev(r8a66597)	(r8a66597->gadget.dev.parent)
+
+static inline u16 r8a66597_read(struct r8a66597 *r8a66597, unsigned long offset)
+{
+	return ioread16(r8a66597->reg + offset);
+}
+
+static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597,
+				      unsigned long offset,
+				      unsigned char *buf,
+				      int len)
+{
+	void __iomem *fifoaddr = r8a66597->reg + offset;
+	unsigned int data = 0;
+	int i;
+
+	if (r8a66597->pdata->on_chip) {
+		/* 32-bit accesses for on_chip controllers */
+
+		/* aligned buf case */
+		if (len >= 4 && !((unsigned long)buf & 0x03)) {
+			ioread32_rep(fifoaddr, buf, len / 4);
+			buf += len & ~0x03;
+			len &= 0x03;
+		}
+
+		/* unaligned buf case */
+		for (i = 0; i < len; i++) {
+			if (!(i & 0x03))
+				data = ioread32(fifoaddr);
+
+			buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
+		}
+	} else {
+		/* 16-bit accesses for external controllers */
+
+		/* aligned buf case */
+		if (len >= 2 && !((unsigned long)buf & 0x01)) {
+			ioread16_rep(fifoaddr, buf, len / 2);
+			buf += len & ~0x01;
+			len &= 0x01;
+		}
+
+		/* unaligned buf case */
+		for (i = 0; i < len; i++) {
+			if (!(i & 0x01))
+				data = ioread16(fifoaddr);
+
+			buf[i] = (data >> ((i & 0x01) * 8)) & 0xff;
+		}
+	}
+}
+
+static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val,
+				  unsigned long offset)
+{
+	iowrite16(val, r8a66597->reg + offset);
+}
+
+static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
+				 u16 val, u16 pat, unsigned long offset)
+{
+	u16 tmp;
+	tmp = r8a66597_read(r8a66597, offset);
+	tmp = tmp & (~pat);
+	tmp = tmp | val;
+	r8a66597_write(r8a66597, tmp, offset);
+}
+
+#define r8a66597_bclr(r8a66597, val, offset)	\
+			r8a66597_mdfy(r8a66597, 0, val, offset)
+#define r8a66597_bset(r8a66597, val, offset)	\
+			r8a66597_mdfy(r8a66597, val, 0, offset)
+
+static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
+				       struct r8a66597_ep *ep,
+				       unsigned char *buf,
+				       int len)
+{
+	void __iomem *fifoaddr = r8a66597->reg + ep->fifoaddr;
+	int adj = 0;
+	int i;
+
+	if (r8a66597->pdata->on_chip) {
+		/* 32-bit access only if buf is 32-bit aligned */
+		if (len >= 4 && !((unsigned long)buf & 0x03)) {
+			iowrite32_rep(fifoaddr, buf, len / 4);
+			buf += len & ~0x03;
+			len &= 0x03;
+		}
+	} else {
+		/* 16-bit access only if buf is 16-bit aligned */
+		if (len >= 2 && !((unsigned long)buf & 0x01)) {
+			iowrite16_rep(fifoaddr, buf, len / 2);
+			buf += len & ~0x01;
+			len &= 0x01;
+		}
+	}
+
+	/* adjust fifo address in the little endian case */
+	if (!(r8a66597_read(r8a66597, CFIFOSEL) & BIGEND)) {
+		if (r8a66597->pdata->on_chip)
+			adj = 0x03; /* 32-bit wide */
+		else
+			adj = 0x01; /* 16-bit wide */
+	}
+
+	if (r8a66597->pdata->wr0_shorted_to_wr1)
+		r8a66597_bclr(r8a66597, MBW_16, ep->fifosel);
+	for (i = 0; i < len; i++)
+		iowrite8(buf[i], fifoaddr + adj - (i & adj));
+	if (r8a66597->pdata->wr0_shorted_to_wr1)
+		r8a66597_bclr(r8a66597, MBW_16, ep->fifosel);
+}
+
+static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
+{
+	u16 clock = 0;
+
+	switch (pdata->xtal) {
+	case R8A66597_PLATDATA_XTAL_12MHZ:
+		clock = XTAL12;
+		break;
+	case R8A66597_PLATDATA_XTAL_24MHZ:
+		clock = XTAL24;
+		break;
+	case R8A66597_PLATDATA_XTAL_48MHZ:
+		clock = XTAL48;
+		break;
+	default:
+		printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
+		break;
+	}
+
+	return clock;
+}
+
+static inline u32 r8a66597_sudmac_read(struct r8a66597 *r8a66597,
+				       unsigned long offset)
+{
+	return ioread32(r8a66597->sudmac_reg + offset);
+}
+
+static inline void r8a66597_sudmac_write(struct r8a66597 *r8a66597, u32 val,
+					 unsigned long offset)
+{
+	iowrite32(val, r8a66597->sudmac_reg + offset);
+}
+
+#define get_pipectr_addr(pipenum)	(PIPE1CTR + (pipenum - 1) * 2)
+#define get_pipetre_addr(pipenum)	(PIPE1TRE + (pipenum - 1) * 4)
+#define get_pipetrn_addr(pipenum)	(PIPE1TRN + (pipenum - 1) * 4)
+
+#define enable_irq_ready(r8a66597, pipenum)	\
+	enable_pipe_irq(r8a66597, pipenum, BRDYENB)
+#define disable_irq_ready(r8a66597, pipenum)	\
+	disable_pipe_irq(r8a66597, pipenum, BRDYENB)
+#define enable_irq_empty(r8a66597, pipenum)	\
+	enable_pipe_irq(r8a66597, pipenum, BEMPENB)
+#define disable_irq_empty(r8a66597, pipenum)	\
+	disable_pipe_irq(r8a66597, pipenum, BEMPENB)
+#define enable_irq_nrdy(r8a66597, pipenum)	\
+	enable_pipe_irq(r8a66597, pipenum, NRDYENB)
+#define disable_irq_nrdy(r8a66597, pipenum)	\
+	disable_pipe_irq(r8a66597, pipenum, NRDYENB)
+
+#endif	/* __R8A66597_H__ */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/rndis.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/rndis.c
new file mode 100755
index 0000000..e2d2e3b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/rndis.c
@@ -0,0 +1,1349 @@
+/*
+ * RNDIS MSG parser
+ *
+ * Authors:	Benedikt Spranger, Pengutronix
+ *		Robert Schwebel, Pengutronix
+ *
+ *              This program is free software; you can redistribute it and/or
+ *              modify it under the terms of the GNU General Public License
+ *              version 2, as published by the Free Software Foundation.
+ *
+ *		This software was originally developed in conformance with
+ *		Microsoft's Remote NDIS Specification License Agreement.
+ *
+ * 03/12/2004 Kai-Uwe Bloem <linux-development@auerswald.de>
+ *		Fixed message length bug in init_response
+ *
+ * 03/25/2004 Kai-Uwe Bloem <linux-development@auerswald.de>
+ *		Fixed rndis_rm_hdr length bug.
+ *
+ * Copyright (C) 2004 by David Brownell
+ *		updates to merge with Linux 2.6, better match RNDIS spec
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/netdevice.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+
+#undef	VERBOSE_DEBUG
+
+#include "rndis.h"
+#include "u_ether.h"
+
+
+/* The driver for your USB chip needs to support ep0 OUT to work with
+ * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional).
+ *
+ * Windows hosts need an INF file like Documentation/usb/linux.inf
+ * and will be happier if you provide the host_addr module parameter.
+ */
+
+#if 0
+static int rndis_debug = 0;
+module_param (rndis_debug, int, 0);
+MODULE_PARM_DESC (rndis_debug, "enable debugging");
+#else
+#define rndis_debug		0
+#endif
+extern unsigned int get_panic_flag(void);
+
+#define RNDIS_MAX_CONFIGS	1
+extern void usb_notify_up(usb_notify_event notify_type, void* puf);
+
+
+static rndis_params rndis_per_dev_params[RNDIS_MAX_CONFIGS];
+
+/* Driver Version */
+static const __le32 rndis_driver_version = cpu_to_le32(1);
+
+/* Function Prototypes */
+static rndis_resp_t *rndis_add_response(int configNr, u32 length);
+
+
+/* supported OIDs */
+static const u32 oid_supported_list[] =
+{
+	/* the general stuff */
+	OID_GEN_SUPPORTED_LIST,
+	OID_GEN_HARDWARE_STATUS,
+	OID_GEN_MEDIA_SUPPORTED,
+	OID_GEN_MEDIA_IN_USE,
+	OID_GEN_MAXIMUM_FRAME_SIZE,
+	OID_GEN_LINK_SPEED,
+	OID_GEN_TRANSMIT_BLOCK_SIZE,
+	OID_GEN_RECEIVE_BLOCK_SIZE,
+	OID_GEN_VENDOR_ID,
+	OID_GEN_VENDOR_DESCRIPTION,
+	OID_GEN_VENDOR_DRIVER_VERSION,
+	OID_GEN_CURRENT_PACKET_FILTER,
+	OID_GEN_MAXIMUM_TOTAL_SIZE,
+	OID_GEN_MEDIA_CONNECT_STATUS,
+	OID_GEN_PHYSICAL_MEDIUM,
+
+/* RNDIS¿ØÖÆÏûÏ¢¾«¼ò£¬ÓÅ»¯FTPʱÁ÷Á¿²»Îȶ¨ÎÊÌâ */
+#if 0
+	/* the statistical stuff */
+	OID_GEN_XMIT_OK,
+	OID_GEN_RCV_OK,
+	OID_GEN_XMIT_ERROR,
+	OID_GEN_RCV_ERROR,
+	OID_GEN_RCV_NO_BUFFER,
+#endif 
+
+
+#ifdef	RNDIS_OPTIONAL_STATS
+	OID_GEN_DIRECTED_BYTES_XMIT,
+	OID_GEN_DIRECTED_FRAMES_XMIT,
+	OID_GEN_MULTICAST_BYTES_XMIT,
+	OID_GEN_MULTICAST_FRAMES_XMIT,
+	OID_GEN_BROADCAST_BYTES_XMIT,
+	OID_GEN_BROADCAST_FRAMES_XMIT,
+	OID_GEN_DIRECTED_BYTES_RCV,
+	OID_GEN_DIRECTED_FRAMES_RCV,
+	OID_GEN_MULTICAST_BYTES_RCV,
+	OID_GEN_MULTICAST_FRAMES_RCV,
+	OID_GEN_BROADCAST_BYTES_RCV,
+	OID_GEN_BROADCAST_FRAMES_RCV,
+	OID_GEN_RCV_CRC_ERROR,
+	OID_GEN_TRANSMIT_QUEUE_LENGTH,
+#endif	/* RNDIS_OPTIONAL_STATS */
+
+	/* mandatory 802.3 */
+	/* the general stuff */
+	OID_802_3_PERMANENT_ADDRESS,
+	OID_802_3_CURRENT_ADDRESS,
+	OID_802_3_MULTICAST_LIST,
+	OID_802_3_MAC_OPTIONS,
+	OID_802_3_MAXIMUM_LIST_SIZE,
+
+	/* the statistical stuff */
+	OID_802_3_RCV_ERROR_ALIGNMENT,
+	OID_802_3_XMIT_ONE_COLLISION,
+	OID_802_3_XMIT_MORE_COLLISIONS,
+#ifdef	RNDIS_OPTIONAL_STATS
+	OID_802_3_XMIT_DEFERRED,
+	OID_802_3_XMIT_MAX_COLLISIONS,
+	OID_802_3_RCV_OVERRUN,
+	OID_802_3_XMIT_UNDERRUN,
+	OID_802_3_XMIT_HEARTBEAT_FAILURE,
+	OID_802_3_XMIT_TIMES_CRS_LOST,
+	OID_802_3_XMIT_LATE_COLLISIONS,
+#endif	/* RNDIS_OPTIONAL_STATS */
+
+#ifdef	RNDIS_PM
+	/* PM and wakeup are "mandatory" for USB, but the RNDIS specs
+	 * don't say what they mean ... and the NDIS specs are often
+	 * confusing and/or ambiguous in this context.  (That is, more
+	 * so than their specs for the other OIDs.)
+	 *
+	 * FIXME someone who knows what these should do, please
+	 * implement them!
+	 */
+
+	/* power management */
+	OID_PNP_CAPABILITIES,
+	OID_PNP_QUERY_POWER,
+	OID_PNP_SET_POWER,
+
+#ifdef	RNDIS_WAKEUP
+	/* wake up host */
+	OID_PNP_ENABLE_WAKE_UP,
+	OID_PNP_ADD_WAKE_UP_PATTERN,
+	OID_PNP_REMOVE_WAKE_UP_PATTERN,
+#endif	/* RNDIS_WAKEUP */
+#endif	/* RNDIS_PM */
+};
+
+extern struct eth_dev alloc_dev;
+
+/* NDIS Functions */
+static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
+			       unsigned buf_len, rndis_resp_t *r)
+{
+	int retval = -ENOTSUPP;
+	u32 length = 4;	/* usually */
+	__le32 *outbuf;
+	int i, count;
+	rndis_query_cmplt_type *resp;
+	struct net_device *net;
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *stats;
+
+	if (!r) return -ENOMEM;
+	resp = (rndis_query_cmplt_type *)r->buf;
+
+	if (!resp) return -ENOMEM;
+
+	if (buf_len && rndis_debug > 1) {
+		pr_debug("query OID %08x value, len %d:\n", OID, buf_len);
+		for (i = 0; i < buf_len; i += 16) {
+			pr_debug("%03d: %08x %08x %08x %08x\n", i,
+				get_unaligned_le32(&buf[i]),
+				get_unaligned_le32(&buf[i + 4]),
+				get_unaligned_le32(&buf[i + 8]),
+				get_unaligned_le32(&buf[i + 12]));
+		}
+	}
+
+	/* response goes here, right after the header */
+	outbuf = (__le32 *)&resp[1];
+	resp->InformationBufferOffset = cpu_to_le32(16);
+
+	net = rndis_per_dev_params[configNr].dev;
+	stats = dev_get_stats(net, &temp);
+
+	switch (OID) {
+
+	/* general oids (table 4-1) */
+
+	/* mandatory */
+	case OID_GEN_SUPPORTED_LIST:
+		pr_debug("%s: OID_GEN_SUPPORTED_LIST\n", __func__);
+		length = sizeof(oid_supported_list);
+		count  = length / sizeof(u32);
+		for (i = 0; i < count; i++)
+			outbuf[i] = cpu_to_le32(oid_supported_list[i]);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_HARDWARE_STATUS:
+		pr_debug("%s: OID_GEN_HARDWARE_STATUS\n", __func__);
+		/* Bogus question!
+		 * Hardware must be ready to receive high level protocols.
+		 * BTW:
+		 * reddite ergo quae sunt Caesaris Caesari
+		 * et quae sunt Dei Deo!
+		 */
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_MEDIA_SUPPORTED:
+		pr_debug("%s: OID_GEN_MEDIA_SUPPORTED\n", __func__);
+		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_MEDIA_IN_USE:
+		pr_debug("%s: OID_GEN_MEDIA_IN_USE\n", __func__);
+		/* one medium, one transport... (maybe you do it better) */
+		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_MAXIMUM_FRAME_SIZE:
+		pr_debug("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			*outbuf = cpu_to_le32(
+				rndis_per_dev_params[configNr].dev->mtu);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_GEN_LINK_SPEED:
+		if (rndis_debug > 1)
+			pr_debug("%s: OID_GEN_LINK_SPEED\n", __func__);
+#if 0
+		if (rndis_per_dev_params[configNr].media_state
+				== NDIS_MEDIA_STATE_DISCONNECTED)
+			*outbuf = cpu_to_le32(0);
+		else
+#endif			
+			*outbuf = cpu_to_le32(
+				rndis_per_dev_params[configNr].speed);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_TRANSMIT_BLOCK_SIZE:
+		pr_debug("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			*outbuf = cpu_to_le32(
+				rndis_per_dev_params[configNr].dev->mtu);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_GEN_RECEIVE_BLOCK_SIZE:
+		pr_debug("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			*outbuf = cpu_to_le32(
+				rndis_per_dev_params[configNr].dev->mtu);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_GEN_VENDOR_ID:
+		pr_debug("%s: OID_GEN_VENDOR_ID\n", __func__);
+		*outbuf = cpu_to_le32(
+			rndis_per_dev_params[configNr].vendorID);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_VENDOR_DESCRIPTION:
+		pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
+		if (rndis_per_dev_params[configNr].vendorDescr) {
+			length = strlen(rndis_per_dev_params[configNr].
+					vendorDescr);
+			memcpy(outbuf,
+				rndis_per_dev_params[configNr].vendorDescr,
+				length);
+		} else {
+			outbuf[0] = 0;
+		}
+		retval = 0;
+		break;
+
+	case OID_GEN_VENDOR_DRIVER_VERSION:
+		pr_debug("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __func__);
+		/* Created as LE */
+		*outbuf = rndis_driver_version;
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_CURRENT_PACKET_FILTER:
+		pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __func__);
+		*outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_MAXIMUM_TOTAL_SIZE:
+		pr_debug("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__);
+		*outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_GEN_MEDIA_CONNECT_STATUS:
+		if (rndis_debug > 1)
+			pr_debug("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __func__);
+		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr]
+						.media_state);
+		retval = 0;
+		break;
+
+	case OID_GEN_PHYSICAL_MEDIUM:
+		pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__);
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	/* The RNDIS specification is incomplete/wrong.   Some versions
+	 * of MS-Windows expect OIDs that aren't specified there.  Other
+	 * versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
+	 */
+	case OID_GEN_MAC_OPTIONS:		/* from WinME */
+		pr_debug("%s: OID_GEN_MAC_OPTIONS\n", __func__);
+		*outbuf = cpu_to_le32(
+			  NDIS_MAC_OPTION_RECEIVE_SERIALIZED
+			| NDIS_MAC_OPTION_FULL_DUPLEX);
+		retval = 0;
+		break;
+
+	/* statistics OIDs (table 4-2) */
+
+	/* mandatory */
+	case OID_GEN_XMIT_OK:
+		if (rndis_debug > 1)
+			pr_debug("%s: OID_GEN_XMIT_OK\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->tx_packets
+				- stats->tx_errors - stats->tx_dropped);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_GEN_RCV_OK:
+		if (rndis_debug > 1)
+			pr_debug("%s: OID_GEN_RCV_OK\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->rx_packets
+				- stats->rx_errors - stats->rx_dropped);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_GEN_XMIT_ERROR:
+		if (rndis_debug > 1)
+			pr_debug("%s: OID_GEN_XMIT_ERROR\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->tx_errors);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_GEN_RCV_ERROR:
+		if (rndis_debug > 1)
+			pr_debug("%s: OID_GEN_RCV_ERROR\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->rx_errors);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_GEN_RCV_NO_BUFFER:
+		pr_debug("%s: OID_GEN_RCV_NO_BUFFER\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->rx_dropped);
+			retval = 0;
+		}
+		break;
+
+	/* ieee802.3 OIDs (table 4-3) */
+
+	/* mandatory */
+	case OID_802_3_PERMANENT_ADDRESS:
+		pr_debug("%s: OID_802_3_PERMANENT_ADDRESS\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			length = ETH_ALEN;
+			memcpy(outbuf,
+				rndis_per_dev_params[configNr].host_mac,
+				length);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_802_3_CURRENT_ADDRESS:
+		pr_debug("%s: OID_802_3_CURRENT_ADDRESS\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			length = ETH_ALEN;
+			memcpy(outbuf,
+				rndis_per_dev_params [configNr].host_mac,
+				length);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_802_3_MULTICAST_LIST:
+		pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__);
+		/* Multicast base address only */
+		*outbuf = cpu_to_le32(0xE0000000);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_802_3_MAXIMUM_LIST_SIZE:
+		pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
+		/* Multicast base address only */
+		*outbuf = cpu_to_le32(1);
+		retval = 0;
+		break;
+
+	case OID_802_3_MAC_OPTIONS:
+		pr_debug("%s: OID_802_3_MAC_OPTIONS\n", __func__);
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	/* ieee802.3 statistics OIDs (table 4-4) */
+
+	/* mandatory */
+	case OID_802_3_RCV_ERROR_ALIGNMENT:
+		pr_debug("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->rx_frame_errors);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case OID_802_3_XMIT_ONE_COLLISION:
+		pr_debug("%s: OID_802_3_XMIT_ONE_COLLISION\n", __func__);
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case OID_802_3_XMIT_MORE_COLLISIONS:
+		pr_debug("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __func__);
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	default:
+		pr_warning("%s: query unknown OID 0x%08X\n",
+			 __func__, OID);
+	}
+	if (retval < 0)
+		length = 0;
+
+	resp->InformationBufferLength = cpu_to_le32(length);
+	r->length = length + sizeof(*resp);
+	resp->MessageLength = cpu_to_le32(r->length);
+	return retval;
+}
+
+static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
+			     rndis_resp_t *r)
+{
+	rndis_set_cmplt_type *resp;
+	int i, retval = -ENOTSUPP;
+	struct rndis_params *params;
+
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_set_cmplt_type *)r->buf;
+	if (!resp)
+		return -ENOMEM;
+
+	if (buf_len && rndis_debug > 1) {
+		pr_debug("set OID %08x value, len %d:\n", OID, buf_len);
+		for (i = 0; i < buf_len; i += 16) {
+			pr_debug("%03d: %08x %08x %08x %08x\n", i,
+				get_unaligned_le32(&buf[i]),
+				get_unaligned_le32(&buf[i + 4]),
+				get_unaligned_le32(&buf[i + 8]),
+				get_unaligned_le32(&buf[i + 12]));
+		}
+	}
+
+	params = &rndis_per_dev_params[configNr];
+	switch (OID) {
+	case OID_GEN_CURRENT_PACKET_FILTER:
+
+		/* these NDIS_PACKET_TYPE_* bitflags are shared with
+		 * cdc_filter; it's not RNDIS-specific
+		 * NDIS_PACKET_TYPE_x == USB_CDC_PACKET_TYPE_x for x in:
+		 *	PROMISCUOUS, DIRECTED,
+		 *	MULTICAST, ALL_MULTICAST, BROADCAST
+		 */
+		*params->filter = (u16)get_unaligned_le32(buf);
+		pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
+			__func__, *params->filter);
+
+		/* this call has a significant side effect:  it's
+		 * what makes the packet flow start and stop, like
+		 * activating the CDC Ethernet altsetting.
+		 */
+		retval = 0;
+		if (*params->filter) {
+		//	if(params->media_state == NDIS_MEDIA_STATE_CONNECTED)
+			{
+				printk("media_state connected\n");
+		//	}else{
+				params->state = RNDIS_DATA_INITIALIZED;
+				netif_carrier_on(params->dev);
+				gether_uevent_eth_switch(params->dev, 1);
+				if (netif_running(params->dev))
+					netif_wake_queue(params->dev);
+			}
+		} else {
+			params->state = RNDIS_INITIALIZED;
+			netif_carrier_off(params->dev);
+			gether_uevent_eth_switch(params->dev, 0);
+			netif_stop_queue(params->dev);
+		}
+		break;
+
+	case OID_802_3_MULTICAST_LIST:
+		/* I think we can ignore this */
+		pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__);
+		retval = 0;
+		break;
+
+	default:
+		pr_warning("%s: set unknown OID 0x%08X, size %d\n",
+			 __func__, OID, buf_len);
+	}
+
+	return retval;
+}
+
+/*
+ * Response Functions
+ */
+
+static int rndis_init_response(int configNr, rndis_init_msg_type *buf)
+{
+	rndis_init_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	if (!params->dev)
+		return -ENOTSUPP;
+
+	r = rndis_add_response(configNr, sizeof(rndis_init_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_init_cmplt_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(REMOTE_NDIS_INITIALIZE_CMPLT);
+	resp->MessageLength = cpu_to_le32(52);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+	resp->MajorVersion = cpu_to_le32(RNDIS_MAJOR_VERSION);
+	resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
+	resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
+	resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
+
+#if 1
+	resp->MaxPacketsPerTransfer = cpu_to_le32(multi_packet_get_maxnum());//cpu_to_le32(1);
+	resp->MaxTransferSize = 1568*multi_packet_get_maxnum();
+#else
+	resp->MaxPacketsPerTransfer = cpu_to_le32(1);
+	resp->MaxTransferSize = cpu_to_le32(
+		  params->dev->mtu
+		+ sizeof(struct ethhdr)
+		+ sizeof(struct rndis_packet_msg_type)
+		+ 22);
+#endif
+	resp->PacketAlignmentFactor = cpu_to_le32(2);
+	resp->AFListOffset = cpu_to_le32(0);
+	resp->AFListSize = cpu_to_le32(0);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+static int rndis_query_response(int configNr, rndis_query_msg_type *buf)
+{
+	rndis_query_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	/* pr_debug("%s: OID = %08X\n", __func__, cpu_to_le32(buf->OID)); */
+	if (!params->dev)
+		return -ENOTSUPP;
+
+	/*
+	 * we need more memory:
+	 * gen_ndis_query_resp expects enough space for
+	 * rndis_query_cmplt_type followed by data.
+	 * oid_supported_list is the largest data reply
+	 */
+	r = rndis_add_response(configNr,
+		sizeof(oid_supported_list) + sizeof(rndis_query_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_query_cmplt_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(REMOTE_NDIS_QUERY_CMPLT);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+
+	if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID),
+			le32_to_cpu(buf->InformationBufferOffset)
+					+ 8 + (u8 *)buf,
+			le32_to_cpu(buf->InformationBufferLength),
+			r)) {
+		/* OID not supported */
+		resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED);
+		resp->MessageLength = cpu_to_le32(sizeof *resp);
+		resp->InformationBufferLength = cpu_to_le32(0);
+		resp->InformationBufferOffset = cpu_to_le32(0);
+	} else
+		resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+static int rndis_set_response(int configNr, rndis_set_msg_type *buf)
+{
+	u32 BufLength, BufOffset;
+	rndis_set_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	BufLength = le32_to_cpu(buf->InformationBufferLength);
+	BufOffset = le32_to_cpu(buf->InformationBufferOffset);
+	if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
+	    (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
+		    return -EINVAL;
+
+	r = rndis_add_response(configNr, sizeof(rndis_set_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_set_cmplt_type *)r->buf;
+
+#ifdef	VERBOSE_DEBUG
+	pr_debug("%s: Length: %d\n", __func__, BufLength);
+	pr_debug("%s: Offset: %d\n", __func__, BufOffset);
+	pr_debug("%s: InfoBuffer: ", __func__);
+
+	for (i = 0; i < BufLength; i++) {
+		pr_debug("%02x ", *(((u8 *) buf) + i + 8 + BufOffset));
+	}
+
+	pr_debug("\n");
+#endif
+
+	resp->MessageType = cpu_to_le32(REMOTE_NDIS_SET_CMPLT);
+	resp->MessageLength = cpu_to_le32(16);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID),
+			((u8 *)buf) + 8 + BufOffset, BufLength, r))
+		resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED);
+	else
+		resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf)
+{
+	rndis_reset_cmplt_type *resp;
+	rndis_resp_t *r;
+	u8 *xbuf = NULL;
+	u32 length = 0;	
+	struct rndis_params *params = &rndis_per_dev_params[configNr];
+	//u32 panic_flag = get_panic_flag();
+	/* drain the response queue */
+	while ((xbuf = rndis_get_next_response(configNr, &length))){
+		//printk("reset_response,free, type:0x%x\n", );
+		rndis_free_response(configNr, xbuf);
+	}
+	
+	//if(params->media_state == NDIS_MEDIA_STATE_CONNECTED)
+	{	
+		printk("got reset_response,now re plugout/in\n");
+		usb_notify_up(USB_DEVICE_EXCEPT_RESET, NULL);
+	}
+#if 0	
+	r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+
+	resp = (rndis_reset_cmplt_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(REMOTE_NDIS_RESET_CMPLT);
+	resp->MessageLength = cpu_to_le32(16);
+	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+	/* resent information */
+	resp->AddressingReset = cpu_to_le32(1);
+
+	params->resp_avail(params->v);
+#endif	
+	return 0;
+}
+
+static int rndis_keepalive_response(int configNr,
+				    rndis_keepalive_msg_type *buf)
+{
+	rndis_keepalive_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	/* host "should" check only in RNDIS_DATA_INITIALIZED state */
+
+	r = rndis_add_response(configNr, sizeof(rndis_keepalive_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_keepalive_cmplt_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(
+			REMOTE_NDIS_KEEPALIVE_CMPLT);
+	resp->MessageLength = cpu_to_le32(16);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+
+/*
+ * Device to Host Comunication
+ */
+static int rndis_indicate_status_msg(int configNr, u32 status)
+{
+	rndis_indicate_status_msg_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	if (params->state == RNDIS_UNINITIALIZED)
+		return -ENOTSUPP;
+
+	r = rndis_add_response(configNr,
+				sizeof(rndis_indicate_status_msg_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_indicate_status_msg_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(REMOTE_NDIS_INDICATE_STATUS_MSG);
+	resp->MessageLength = cpu_to_le32(20);
+	resp->Status = cpu_to_le32(status);
+	resp->StatusBufferLength = cpu_to_le32(0);
+	resp->StatusBufferOffset = cpu_to_le32(0);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+int rndis_signal_connect(int configNr)
+{
+
+	rndis_per_dev_params[configNr].media_state
+			= NDIS_MEDIA_STATE_CONNECTED;
+	return rndis_indicate_status_msg(configNr,
+					  RNDIS_STATUS_MEDIA_CONNECT);
+}
+
+int rndis_signal_disconnect(int configNr)
+{
+	rndis_per_dev_params[configNr].media_state
+			= NDIS_MEDIA_STATE_DISCONNECTED;
+	return rndis_indicate_status_msg(configNr,
+					  RNDIS_STATUS_MEDIA_DISCONNECT);
+}
+
+void rndis_uninit(int configNr)
+{
+	u8 *buf;
+	u32 length;
+
+	if (configNr >= RNDIS_MAX_CONFIGS)
+		return;
+	rndis_per_dev_params[configNr].state = RNDIS_UNINITIALIZED;
+
+	/* drain the response queue */
+	while ((buf = rndis_get_next_response(configNr, &length)))
+		rndis_free_response(configNr, buf);
+}
+
+void rndis_set_host_mac(int configNr, const u8 *addr)
+{
+	rndis_per_dev_params[configNr].host_mac = addr;
+}
+
+#ifdef CONFIG_ARCH_ZX297520V3_MDL
+
+int chcek_query_oid(u32 oid)
+{
+	switch(oid){	
+		case OID_GEN_SUPPORTED_LIST:
+		case OID_GEN_HARDWARE_STATUS:
+		case OID_GEN_MEDIA_SUPPORTED:
+		case OID_GEN_MEDIA_IN_USE:
+		case OID_GEN_MAXIMUM_FRAME_SIZE:
+		case OID_GEN_LINK_SPEED:
+		case OID_GEN_TRANSMIT_BLOCK_SIZE:
+		case OID_GEN_RECEIVE_BLOCK_SIZE:
+		case OID_GEN_VENDOR_ID:
+		case OID_GEN_VENDOR_DESCRIPTION:	
+		case OID_GEN_VENDOR_DRIVER_VERSION:
+		case OID_GEN_CURRENT_PACKET_FILTER:
+		case OID_GEN_MAXIMUM_TOTAL_SIZE:
+		case OID_GEN_MEDIA_CONNECT_STATUS:	
+		case OID_GEN_PHYSICAL_MEDIUM:
+		case OID_GEN_MAC_OPTIONS:	
+		case OID_GEN_XMIT_OK:
+		case OID_GEN_RCV_OK:
+		case OID_GEN_XMIT_ERROR:
+		case OID_GEN_RCV_ERROR:
+		case OID_GEN_RCV_NO_BUFFER:
+		case OID_802_3_PERMANENT_ADDRESS:
+		case OID_802_3_CURRENT_ADDRESS:
+		case OID_802_3_MULTICAST_LIST:
+		case OID_802_3_MAXIMUM_LIST_SIZE:
+		case OID_802_3_MAC_OPTIONS:
+		case OID_802_3_RCV_ERROR_ALIGNMENT:
+		case OID_802_3_XMIT_ONE_COLLISION:
+		case OID_802_3_XMIT_MORE_COLLISIONS:
+			printk("oid  match :0x%08x", oid);
+			return 0;
+		default:
+			printk("%s: query unknown OID 0x%08X\n",
+				 __func__, oid);
+		}
+
+	return -1;
+}
+/*we suppose that got a msg lost the 4 byte head,
+ *
+ */
+int rndis_msg_patch(u8 *buf, int len)
+{
+	u8* temp_buf = NULL;
+	__le32 *temp;
+	temp_buf = (u8*)kmalloc(len, GFP_KERNEL);
+	if(!temp_buf){
+		printk("rndis_msg_patch malloc fail\n");
+		return -1;
+	}
+	memset(temp_buf, 0 , len);
+	temp = (__le32 *)temp_buf;
+	*temp = cpu_to_le32(REMOTE_NDIS_QUERY_MSG);
+	memcpy(temp_buf+4, buf, len -4);
+	memcpy(buf, temp_buf, len);
+	kfree(temp_buf);
+	temp_buf = NULL;
+	return 0;
+}
+#endif
+/*
+ * Message Parser
+ */
+int rndis_msg_parser(u8 configNr, u8 *buf)
+{
+	u32 MsgType, MsgLength,tmp_oid;
+	__le32 *tmp;
+	struct rndis_params *params;
+	struct gether *link;
+
+	if (!buf)
+		return -ENOMEM;
+
+	tmp = (__le32 *)buf;
+	MsgType   = get_unaligned_le32(tmp++);
+	MsgLength = get_unaligned_le32(tmp++);
+
+	if (configNr >= RNDIS_MAX_CONFIGS){
+		USBSTACK_DBG("%s, %u error", __func__, __LINE__);
+		return -ENOTSUPP;
+	}
+	params = &rndis_per_dev_params[configNr];
+
+	/* NOTE: RNDIS is *EXTREMELY* chatty ... Windows constantly polls for
+	 * rx/tx statistics and link status, in addition to KEEPALIVE traffic
+	 * and normal HC level polling to see if there's any IN traffic.
+	 */
+
+	/* For USB: responses may take up to 10 seconds */
+
+
+	
+	switch (MsgType) {
+	case REMOTE_NDIS_INITIALIZE_MSG:
+		pr_debug("%s: REMOTE_NDIS_INITIALIZE_MSG\n",
+			__func__);
+			if(params->media_state == NDIS_MEDIA_STATE_CONNECTED && params->state == RNDIS_DATA_INITIALIZED){
+				printk("----gsn--,init msg got again,do nothing\n");
+			} else				
+				params->state = RNDIS_INITIALIZED;
+		return rndis_init_response(configNr,
+					(rndis_init_msg_type *)buf);
+
+	case REMOTE_NDIS_HALT_MSG:
+		pr_debug("%s: REMOTE_NDIS_HALT_MSG\n",
+			__func__);
+		params->state = RNDIS_UNINITIALIZED;
+		if (params->dev) {
+			netif_carrier_off(params->dev);
+			gether_uevent_eth_switch(params->dev, 0);
+			netif_stop_queue(params->dev);
+		}
+		return 0;
+
+	case REMOTE_NDIS_QUERY_MSG:
+		return rndis_query_response(configNr,
+					(rndis_query_msg_type *)buf);
+
+	case REMOTE_NDIS_SET_MSG:
+		return rndis_set_response(configNr,
+					(rndis_set_msg_type *)buf);
+
+	case REMOTE_NDIS_RESET_MSG:
+		pr_debug("%s: REMOTE_NDIS_RESET_MSG\n",
+			__func__);
+		return rndis_reset_response(configNr,
+					(rndis_reset_msg_type *)buf);
+
+	case REMOTE_NDIS_KEEPALIVE_MSG:
+		/* For USB: host does this every 5 seconds */
+		if (rndis_debug > 1)
+			pr_debug("%s: REMOTE_NDIS_KEEPALIVE_MSG\n",
+				__func__);
+		return rndis_keepalive_response(configNr,
+						 (rndis_keepalive_msg_type *)
+						 buf);
+
+	default:
+		/* At least Windows XP emits some undefined RNDIS messages.
+		 * In one case those messages seemed to relate to the host
+		 * suspending itself.
+		 */
+		USBSTACK_DBG("%s: unknown RNDIS message 0x%08X len %d\n",
+			__func__, MsgType, MsgLength);
+		printk("%s: unknown RNDIS message 0x%08X len %d\n",
+			__func__, MsgType, MsgLength);		
+		if (MsgLength > 32)
+			MsgLength = 32;
+		{
+			unsigned i;
+			for (i = 0; i < MsgLength; i += 16) {
+				printk("%03d: "
+					" %02x %02x %02x %02x"
+					" %02x %02x %02x %02x"
+					" %02x %02x %02x %02x"
+					" %02x %02x %02x %02x"
+					"\n",
+					i,
+					buf[i], buf [i+1],
+						buf[i+2], buf[i+3],
+					buf[i+4], buf [i+5],
+						buf[i+6], buf[i+7],
+					buf[i+8], buf [i+9],
+						buf[i+10], buf[i+11],
+					buf[i+12], buf [i+13],
+						buf[i+14], buf[i+15]);
+
+			}
+		}
+
+#ifdef CONFIG_ARCH_ZX297520V3_MDL		
+		//this msg lost 4 byte,we patch it 
+		tmp_oid = get_unaligned_le32(tmp);
+		if(chcek_query_oid(tmp_oid) == 0){
+			printk("rndis_msg_parser, need patchlen:0x%x, oid:0x%u\n", MsgType, tmp_oid);
+			goto rndis_patch;
+		}	
+#endif		
+		return MsgType;
+	}
+
+#ifdef CONFIG_ARCH_ZX297520V3_MDL	
+rndis_patch:	
+	/*we suppose that this msg lost the 4 byte head, and will patching this msg*/
+	if(rndis_msg_patch(buf, MsgType) == 0){
+		return rndis_query_response(configNr,
+					(rndis_query_msg_type *)buf);
+	}
+#endif
+	return -ENOTSUPP;
+}
+
+int rndis_register(void (*resp_avail)(void *v), void *v)
+{
+	u8 i;
+
+	if (!resp_avail)
+		return -EINVAL;
+
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+		if (!rndis_per_dev_params[i].used) {
+			rndis_per_dev_params[i].used = 1;
+			rndis_per_dev_params[i].resp_avail = resp_avail;
+			rndis_per_dev_params[i].v = v;
+			pr_debug("%s: configNr = %d\n", __func__, i);
+			return i;
+		}
+	}
+	pr_debug("failed\n");
+
+	return -ENODEV;
+}
+
+void rndis_deregister(int configNr)
+{
+	pr_debug("%s:\n", __func__);
+
+	if (configNr >= RNDIS_MAX_CONFIGS) return;
+	rndis_per_dev_params[configNr].used = 0;
+}
+
+int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)
+{
+	pr_debug("%s:\n", __func__);
+	if (!dev)
+		return -EINVAL;
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+
+	rndis_per_dev_params[configNr].dev = dev;
+	rndis_per_dev_params[configNr].filter = cdc_filter;
+
+	return 0;
+}
+
+int rndis_set_param_vendor(u8 configNr, u32 vendorID, const char *vendorDescr)
+{
+	pr_debug("%s:\n", __func__);
+	if (!vendorDescr) return -1;
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+
+	rndis_per_dev_params[configNr].vendorID = vendorID;
+	rndis_per_dev_params[configNr].vendorDescr = vendorDescr;
+
+	return 0;
+}
+
+int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed)
+{
+	pr_debug("%s: %u %u\n", __func__, medium, speed);
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+
+	rndis_per_dev_params[configNr].medium = medium;
+	//set speed in rndis_init
+	//rndis_per_dev_params[configNr].speed = speed;
+
+	return 0;
+}
+
+void rndis_add_hdr(struct sk_buff *skb)
+{
+	struct rndis_packet_msg_type *header;
+
+	if (!skb)
+		return;
+	header = (void *)skb_push(skb, sizeof(*header));
+	memset(header, 0, sizeof *header);
+	header->MessageType = cpu_to_le32(REMOTE_NDIS_PACKET_MSG);
+	header->MessageLength = cpu_to_le32(skb->len);
+	header->DataOffset = cpu_to_le32(36);
+	header->DataLength = cpu_to_le32(skb->len - sizeof(*header));
+}
+
+void rndis_free_response(int configNr, u8 *buf)
+{
+	rndis_resp_t *r;
+	struct list_head *act, *tmp;
+
+	list_for_each_safe(act, tmp,
+			&(rndis_per_dev_params[configNr].resp_queue))
+	{
+		r = list_entry(act, rndis_resp_t, list);
+		if (r->buf == buf) {
+			list_del(&r->list);
+			kfree(r);
+		}
+	}
+}
+
+u8 *rndis_get_next_response(int configNr, u32 *length)
+{
+	rndis_resp_t *r;
+	struct list_head *act, *tmp;
+
+	if (!length) return NULL;
+
+	list_for_each_safe(act, tmp,
+			&(rndis_per_dev_params[configNr].resp_queue))
+	{
+		r = list_entry(act, rndis_resp_t, list);
+		if (!r->send) {
+			r->send = 1;
+			*length = r->length;
+			return r->buf;
+		}
+	}
+
+	return NULL;
+}
+
+static rndis_resp_t *rndis_add_response(int configNr, u32 length)
+{
+	rndis_resp_t *r;
+
+	/* NOTE: this gets copied into ether.c USB_BUFSIZ bytes ... */
+	r = kmalloc(sizeof(rndis_resp_t) + length, GFP_ATOMIC);
+	if (!r) return NULL;
+
+	r->buf = (u8 *)(r + 1);
+	r->length = length;
+	r->send = 0;
+
+	list_add_tail(&r->list,
+		&(rndis_per_dev_params[configNr].resp_queue));
+	return r;
+}
+
+int rndis_rm_hdr(struct gether *port,
+			struct sk_buff *skb,
+			struct sk_buff_head *list)
+{
+	/* tmp points to a struct rndis_packet_msg_type */
+	__le32 *tmp = (void *)skb->data;
+
+	/* MessageType, MessageLength */
+	if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG)
+			!= get_unaligned(tmp++)) {
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
+	tmp++;
+
+	/* DataOffset, DataLength */
+	if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
+		dev_kfree_skb_any(skb);
+		return -EOVERFLOW;
+	}
+	skb_trim(skb, get_unaligned_le32(tmp++));
+
+	skb_queue_tail(list, skb);
+	return 0;
+}
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static int rndis_proc_show(struct seq_file *m, void *v)
+{
+	rndis_params *param = m->private;
+
+	seq_printf(m,
+			 "Config Nr. %d\n"
+			 "used      : %s\n"
+			 "state     : %s\n"
+			 "medium    : 0x%08X\n"
+			 "speed     : %d\n"
+			 "cable     : %s\n"
+			 "vendor ID : 0x%08X\n"
+			 "vendor    : %s\n",
+			 param->confignr, (param->used) ? "y" : "n",
+			 ({ char *s = "?";
+			 switch (param->state) {
+			 case RNDIS_UNINITIALIZED:
+				s = "RNDIS_UNINITIALIZED"; break;
+			 case RNDIS_INITIALIZED:
+				s = "RNDIS_INITIALIZED"; break;
+			 case RNDIS_DATA_INITIALIZED:
+				s = "RNDIS_DATA_INITIALIZED"; break;
+			}; s; }),
+			 param->medium,
+			 (param->media_state) ? 0 : param->speed*100,
+			 (param->media_state) ? "disconnected" : "connected",
+			 param->vendorID, param->vendorDescr);
+	return 0;
+}
+
+static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
+				size_t count, loff_t *ppos)
+{
+	rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
+	u32 speed = 0;
+	int i, fl_speed = 0;
+
+	for (i = 0; i < count; i++) {
+		char c;
+		if (get_user(c, buffer))
+			return -EFAULT;
+		switch (c) {
+		case '0':
+		case '1':
+		case '2':
+		case '3':
+		case '4':
+		case '5':
+		case '6':
+		case '7':
+		case '8':
+		case '9':
+			fl_speed = 1;
+			speed = speed * 10 + c - '0';
+			break;
+		case 'C':
+		case 'c':
+			rndis_signal_connect(p->confignr);
+			break;
+		case 'D':
+		case 'd':
+			rndis_signal_disconnect(p->confignr);
+			break;
+		default:
+			if (fl_speed) p->speed = speed;
+			else pr_debug("%c is not valid\n", c);
+			break;
+		}
+
+		buffer++;
+	}
+
+	return count;
+}
+
+static int rndis_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rndis_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations rndis_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= rndis_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= rndis_proc_write,
+};
+
+#define	NAME_TEMPLATE "driver/rndis-%03d"
+
+static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
+
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+
+static bool rndis_initialized;
+
+int rndis_init(void)
+{
+	u8 i;
+
+	if (rndis_initialized)
+		return 0;
+
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+#ifdef	CONFIG_USB_GADGET_DEBUG_FILES
+		char name [20];
+
+		sprintf(name, NAME_TEMPLATE, i);
+		rndis_connect_state[i] = proc_create_data(name, 0660, NULL,
+					&rndis_proc_fops,
+					(void *)(rndis_per_dev_params + i));
+		if (!rndis_connect_state[i]) {
+			pr_debug("%s: remove entries", __func__);
+			while (i) {
+				sprintf(name, NAME_TEMPLATE, --i);
+				remove_proc_entry(name, NULL);
+			}
+			pr_debug("\n");
+			return -EIO;
+		}
+#endif
+		rndis_per_dev_params[i].confignr = i;
+		rndis_per_dev_params[i].used = 0;
+		rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED;
+		rndis_per_dev_params[i].media_state
+				= NDIS_MEDIA_STATE_DISCONNECTED;
+		//set speed in rndis_init
+		rndis_per_dev_params[i].speed = (LTE_CAT4_SPEED/100);
+		INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
+	}
+
+	rndis_initialized = true;
+	return 0;
+}
+
+void rndis_exit(void)
+{
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	u8 i;
+	char name[20];
+#endif
+
+	if (!rndis_initialized)
+		return;
+	rndis_initialized = false;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+		sprintf(name, NAME_TEMPLATE, i);
+		remove_proc_entry(name, NULL);
+	}
+#endif
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/rndis.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/rndis.h
new file mode 100644
index 0000000..f0a7468
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/rndis.h
@@ -0,0 +1,267 @@
+/*
+ * RNDIS	Definitions for Remote NDIS
+ *
+ * Authors:	Benedikt Spranger, Pengutronix
+ *		Robert Schwebel, Pengutronix
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		version 2, as published by the Free Software Foundation.
+ *
+ *		This software was originally developed in conformance with
+ *		Microsoft's Remote NDIS Specification License Agreement.
+ */
+
+#ifndef _LINUX_RNDIS_H
+#define _LINUX_RNDIS_H
+
+#include "ndis.h"
+
+#define RNDIS_MAXIMUM_FRAME_SIZE	1518
+#define RNDIS_MAX_TOTAL_SIZE		1558
+
+/* Remote NDIS Versions */
+#define RNDIS_MAJOR_VERSION		1
+#define RNDIS_MINOR_VERSION		0
+
+/* Status Values */
+#define RNDIS_STATUS_SUCCESS		0x00000000U	/* Success           */
+#define RNDIS_STATUS_FAILURE		0xC0000001U	/* Unspecified error */
+#define RNDIS_STATUS_INVALID_DATA	0xC0010015U	/* Invalid data      */
+#define RNDIS_STATUS_NOT_SUPPORTED	0xC00000BBU	/* Unsupported request */
+#define RNDIS_STATUS_MEDIA_CONNECT	0x4001000BU	/* Device connected  */
+#define RNDIS_STATUS_MEDIA_DISCONNECT	0x4001000CU	/* Device disconnected */
+/* For all not specified status messages:
+ * RNDIS_STATUS_Xxx -> NDIS_STATUS_Xxx
+ */
+
+/* Message Set for Connectionless (802.3) Devices */
+#define REMOTE_NDIS_PACKET_MSG		0x00000001U
+#define REMOTE_NDIS_INITIALIZE_MSG	0x00000002U	/* Initialize device */
+#define REMOTE_NDIS_HALT_MSG		0x00000003U
+#define REMOTE_NDIS_QUERY_MSG		0x00000004U
+#define REMOTE_NDIS_SET_MSG		0x00000005U
+#define REMOTE_NDIS_RESET_MSG		0x00000006U
+#define REMOTE_NDIS_INDICATE_STATUS_MSG	0x00000007U
+#define REMOTE_NDIS_KEEPALIVE_MSG	0x00000008U
+
+/* Message completion */
+#define REMOTE_NDIS_INITIALIZE_CMPLT	0x80000002U
+#define REMOTE_NDIS_QUERY_CMPLT		0x80000004U
+#define REMOTE_NDIS_SET_CMPLT		0x80000005U
+#define REMOTE_NDIS_RESET_CMPLT		0x80000006U
+#define REMOTE_NDIS_KEEPALIVE_CMPLT	0x80000008U
+
+/* Device Flags */
+#define RNDIS_DF_CONNECTIONLESS		0x00000001U
+#define RNDIS_DF_CONNECTION_ORIENTED	0x00000002U
+
+#define RNDIS_MEDIUM_802_3		0x00000000U
+
+/* from drivers/net/sk98lin/h/skgepnmi.h */
+#define OID_PNP_CAPABILITIES			0xFD010100
+#define OID_PNP_SET_POWER			0xFD010101
+#define OID_PNP_QUERY_POWER			0xFD010102
+#define OID_PNP_ADD_WAKE_UP_PATTERN		0xFD010103
+#define OID_PNP_REMOVE_WAKE_UP_PATTERN		0xFD010104
+#define OID_PNP_ENABLE_WAKE_UP			0xFD010106
+
+
+typedef struct rndis_init_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	MajorVersion;
+	__le32	MinorVersion;
+	__le32	MaxTransferSize;
+} rndis_init_msg_type;
+
+typedef struct rndis_init_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+	__le32	MajorVersion;
+	__le32	MinorVersion;
+	__le32	DeviceFlags;
+	__le32	Medium;
+	__le32	MaxPacketsPerTransfer;
+	__le32	MaxTransferSize;
+	__le32	PacketAlignmentFactor;
+	__le32	AFListOffset;
+	__le32	AFListSize;
+} rndis_init_cmplt_type;
+
+typedef struct rndis_halt_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+} rndis_halt_msg_type;
+
+typedef struct rndis_query_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	OID;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+	__le32	DeviceVcHandle;
+} rndis_query_msg_type;
+
+typedef struct rndis_query_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+} rndis_query_cmplt_type;
+
+typedef struct rndis_set_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	OID;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+	__le32	DeviceVcHandle;
+} rndis_set_msg_type;
+
+typedef struct rndis_set_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+} rndis_set_cmplt_type;
+
+typedef struct rndis_reset_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Reserved;
+} rndis_reset_msg_type;
+
+typedef struct rndis_reset_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Status;
+	__le32	AddressingReset;
+} rndis_reset_cmplt_type;
+
+typedef struct rndis_indicate_status_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Status;
+	__le32	StatusBufferLength;
+	__le32	StatusBufferOffset;
+} rndis_indicate_status_msg_type;
+
+typedef struct rndis_keepalive_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+} rndis_keepalive_msg_type;
+
+typedef struct rndis_keepalive_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+} rndis_keepalive_cmplt_type;
+
+struct rndis_packet_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	DataOffset;
+	__le32	DataLength;
+	__le32	OOBDataOffset;
+	__le32	OOBDataLength;
+	__le32	NumOOBDataElements;
+	__le32	PerPacketInfoOffset;
+	__le32	PerPacketInfoLength;
+	__le32	VcHandle;
+	__le32	Reserved;
+} __attribute__ ((packed));
+
+struct rndis_config_parameter
+{
+	__le32	ParameterNameOffset;
+	__le32	ParameterNameLength;
+	__le32	ParameterType;
+	__le32	ParameterValueOffset;
+	__le32	ParameterValueLength;
+};
+
+/* implementation specific */
+enum rndis_state
+{
+	RNDIS_UNINITIALIZED,
+	RNDIS_INITIALIZED,
+	RNDIS_DATA_INITIALIZED,
+};
+
+typedef struct rndis_resp_t
+{
+	struct list_head	list;
+	u8			*buf;
+	u32			length;
+	int			send;
+} rndis_resp_t;
+
+typedef struct rndis_params
+{
+	u8			confignr;
+	u8			used;
+	u16			saved_filter;
+	enum rndis_state	state;
+	u32			medium;
+	u32			speed;
+	u32			media_state;
+
+	const u8		*host_mac;
+	u16			*filter;
+	struct net_device	*dev;
+
+	u32			vendorID;
+	const char		*vendorDescr;
+	void			(*resp_avail)(void *v);
+	void			*v;
+	struct list_head	resp_queue;
+} rndis_params;
+
+/* RNDIS Message parser and other useless functions */
+int  rndis_msg_parser (u8 configNr, u8 *buf);
+int  rndis_register(void (*resp_avail)(void *v), void *v);
+void rndis_deregister (int configNr);
+int  rndis_set_param_dev (u8 configNr, struct net_device *dev,
+			 u16 *cdc_filter);
+int  rndis_set_param_vendor (u8 configNr, u32 vendorID,
+			    const char *vendorDescr);
+int  rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
+void rndis_add_hdr (struct sk_buff *skb);
+int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
+			struct sk_buff_head *list);
+u8   *rndis_get_next_response (int configNr, u32 *length);
+void rndis_free_response (int configNr, u8 *buf);
+
+void rndis_uninit (int configNr);
+int  rndis_signal_connect (int configNr);
+int  rndis_signal_disconnect (int configNr);
+int  rndis_state (int configNr);
+extern void rndis_set_host_mac (int configNr, const u8 *addr);
+int rndis_init(void);
+void rndis_exit (void);
+
+#endif  /* _LINUX_RNDIS_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c-hsotg.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c-hsotg.c
new file mode 100644
index 0000000..105b206
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c-hsotg.c
@@ -0,0 +1,3478 @@
+/* linux/drivers/usb/gadget/s3c-hsotg.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *      Ben Dooks <ben@simtec.co.uk>
+ *      http://armlinux.simtec.co.uk/
+ *
+ * S3C USB2.0 High-speed / OtG driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <mach/map.h>
+
+#include <plat/regs-usb-hsotg-phy.h>
+#include <plat/regs-usb-hsotg.h>
+#include <mach/regs-sys.h>
+#include <plat/udc-hs.h>
+#include <plat/cpu.h>
+
+#define DMA_ADDR_INVALID (~((dma_addr_t)0))
+
+/* EP0_MPS_LIMIT
+ *
+ * Unfortunately there seems to be a limit of the amount of data that can
+ * be transferred by IN transactions on EP0. This is either 127 bytes or 3
+ * packets (which practically means 1 packet and 63 bytes of data) when the
+ * MPS is set to 64.
+ *
+ * This means if we are wanting to move >127 bytes of data, we need to
+ * split the transactions up, but just doing one packet at a time does
+ * not work (this may be an implicit DATA0 PID on first packet of the
+ * transaction) and doing 2 packets is outside the controller's limits.
+ *
+ * If we try to lower the MPS size for EP0, then no transfers work properly
+ * for EP0, and the system will fail basic enumeration. As no cause for this
+ * has currently been found, we cannot support any large IN transfers for
+ * EP0.
+ */
+#define EP0_MPS_LIMIT	64
+
+struct s3c_hsotg;
+struct s3c_hsotg_req;
+
+/**
+ * struct s3c_hsotg_ep - driver endpoint definition.
+ * @ep: The gadget layer representation of the endpoint.
+ * @name: The driver generated name for the endpoint.
+ * @queue: Queue of requests for this endpoint.
+ * @parent: Reference back to the parent device structure.
+ * @req: The current request that the endpoint is processing. This is
+ *       used to indicate an request has been loaded onto the endpoint
+ *       and has yet to be completed (maybe due to data move, or simply
+ *	 awaiting an ack from the core all the data has been completed).
+ * @debugfs: File entry for debugfs file for this endpoint.
+ * @lock: State lock to protect contents of endpoint.
+ * @dir_in: Set to true if this endpoint is of the IN direction, which
+ *	    means that it is sending data to the Host.
+ * @index: The index for the endpoint registers.
+ * @name: The name array passed to the USB core.
+ * @halted: Set if the endpoint has been halted.
+ * @periodic: Set if this is a periodic ep, such as Interrupt
+ * @sent_zlp: Set if we've sent a zero-length packet.
+ * @total_data: The total number of data bytes done.
+ * @fifo_size: The size of the FIFO (for periodic IN endpoints)
+ * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
+ * @last_load: The offset of data for the last start of request.
+ * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ *
+ * This is the driver's state for each registered enpoint, allowing it
+ * to keep track of transactions that need doing. Each endpoint has a
+ * lock to protect the state, to try and avoid using an overall lock
+ * for the host controller as much as possible.
+ *
+ * For periodic IN endpoints, we have fifo_size and fifo_load to try
+ * and keep track of the amount of data in the periodic FIFO for each
+ * of these as we don't have a status register that tells us how much
+ * is in each of them. (note, this may actually be useless information
+ * as in shared-fifo mode periodic in acts like a single-frame packet
+ * buffer than a fifo)
+ */
+struct s3c_hsotg_ep {
+	struct usb_ep		ep;
+	struct list_head	queue;
+	struct s3c_hsotg	*parent;
+	struct s3c_hsotg_req	*req;
+	struct dentry		*debugfs;
+
+	spinlock_t		lock;
+
+	unsigned long		total_data;
+	unsigned int		size_loaded;
+	unsigned int		last_load;
+	unsigned int		fifo_load;
+	unsigned short		fifo_size;
+
+	unsigned char		dir_in;
+	unsigned char		index;
+
+	unsigned int		halted:1;
+	unsigned int		periodic:1;
+	unsigned int		sent_zlp:1;
+
+	char			name[10];
+};
+
+#define S3C_HSOTG_EPS	(8+1)	/* limit to 9 for the moment */
+
+/**
+ * struct s3c_hsotg - driver state.
+ * @dev: The parent device supplied to the probe function
+ * @driver: USB gadget driver
+ * @plat: The platform specific configuration data.
+ * @regs: The memory area mapped for accessing registers.
+ * @regs_res: The resource that was allocated when claiming register space.
+ * @irq: The IRQ number we are using
+ * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
+ * @debug_root: root directrory for debugfs.
+ * @debug_file: main status file for debugfs.
+ * @debug_fifo: FIFO status file for debugfs.
+ * @ep0_reply: Request used for ep0 reply.
+ * @ep0_buff: Buffer for EP0 reply data, if needed.
+ * @ctrl_buff: Buffer for EP0 control requests.
+ * @ctrl_req: Request for EP0 control packets.
+ * @eps: The endpoints being supplied to the gadget framework
+ */
+struct s3c_hsotg {
+	struct device		 *dev;
+	struct usb_gadget_driver *driver;
+	struct s3c_hsotg_plat	 *plat;
+
+	void __iomem		*regs;
+	struct resource		*regs_res;
+	int			irq;
+	struct clk		*clk;
+
+	unsigned int		dedicated_fifos:1;
+
+	struct dentry		*debug_root;
+	struct dentry		*debug_file;
+	struct dentry		*debug_fifo;
+
+	struct usb_request	*ep0_reply;
+	struct usb_request	*ctrl_req;
+	u8			ep0_buff[8];
+	u8			ctrl_buff[8];
+
+	struct usb_gadget	gadget;
+	struct s3c_hsotg_ep	eps[];
+};
+
+/**
+ * struct s3c_hsotg_req - data transfer request
+ * @req: The USB gadget request
+ * @queue: The list of requests for the endpoint this is queued for.
+ * @in_progress: Has already had size/packets written to core
+ * @mapped: DMA buffer for this request has been mapped via dma_map_single().
+ */
+struct s3c_hsotg_req {
+	struct usb_request	req;
+	struct list_head	queue;
+	unsigned char		in_progress;
+	unsigned char		mapped;
+};
+
+/* conversion functions */
+static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
+{
+	return container_of(req, struct s3c_hsotg_req, req);
+}
+
+static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
+{
+	return container_of(ep, struct s3c_hsotg_ep, ep);
+}
+
+static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
+{
+	return container_of(gadget, struct s3c_hsotg, gadget);
+}
+
+static inline void __orr32(void __iomem *ptr, u32 val)
+{
+	writel(readl(ptr) | val, ptr);
+}
+
+static inline void __bic32(void __iomem *ptr, u32 val)
+{
+	writel(readl(ptr) & ~val, ptr);
+}
+
+/* forward decleration of functions */
+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
+
+/**
+ * using_dma - return the DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using DMA.
+ *
+ * Currently, we have the DMA support code worked into everywhere
+ * that needs it, but the AMBA DMA implementation in the hardware can
+ * only DMA from 32bit aligned addresses. This means that gadgets such
+ * as the CDC Ethernet cannot work as they often pass packets which are
+ * not 32bit aligned.
+ *
+ * Unfortunately the choice to use DMA or not is global to the controller
+ * and seems to be only settable when the controller is being put through
+ * a core reset. This means we either need to fix the gadgets to take
+ * account of DMA alignment, or add bounce buffers (yuerk).
+ *
+ * Until this issue is sorted out, we always return 'false'.
+ */
+static inline bool using_dma(struct s3c_hsotg *hsotg)
+{
+	return false;	/* support is not complete */
+}
+
+/**
+ * s3c_hsotg_en_gsint - enable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
+{
+	u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
+	u32 new_gsintmsk;
+
+	new_gsintmsk = gsintmsk | ints;
+
+	if (new_gsintmsk != gsintmsk) {
+		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
+		writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
+	}
+}
+
+/**
+ * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
+{
+	u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
+	u32 new_gsintmsk;
+
+	new_gsintmsk = gsintmsk & ~ints;
+
+	if (new_gsintmsk != gsintmsk)
+		writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
+}
+
+/**
+ * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
+ * @hsotg: The device state
+ * @ep: The endpoint index
+ * @dir_in: True if direction is in.
+ * @en: The enable value, true to enable
+ *
+ * Set or clear the mask for an individual endpoint's interrupt
+ * request.
+ */
+static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
+				 unsigned int ep, unsigned int dir_in,
+				 unsigned int en)
+{
+	unsigned long flags;
+	u32 bit = 1 << ep;
+	u32 daint;
+
+	if (!dir_in)
+		bit <<= 16;
+
+	local_irq_save(flags);
+	daint = readl(hsotg->regs + S3C_DAINTMSK);
+	if (en)
+		daint |= bit;
+	else
+		daint &= ~bit;
+	writel(daint, hsotg->regs + S3C_DAINTMSK);
+	local_irq_restore(flags);
+}
+
+/**
+ * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
+ * @hsotg: The device instance.
+ */
+static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
+{
+	unsigned int ep;
+	unsigned int addr;
+	unsigned int size;
+	int timeout;
+	u32 val;
+
+	/* the ryu 2.6.24 release ahs
+	   writel(0x1C0, hsotg->regs + S3C_GRXFSIZ);
+	   writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) |
+		S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
+		hsotg->regs + S3C_GNPTXFSIZ);
+	*/
+
+	/* set FIFO sizes to 2048/1024 */
+
+	writel(2048, hsotg->regs + S3C_GRXFSIZ);
+	writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) |
+	       S3C_GNPTXFSIZ_NPTxFDep(1024),
+	       hsotg->regs + S3C_GNPTXFSIZ);
+
+	/* arange all the rest of the TX FIFOs, as some versions of this
+	 * block have overlapping default addresses. This also ensures
+	 * that if the settings have been changed, then they are set to
+	 * known values. */
+
+	/* start at the end of the GNPTXFSIZ, rounded up */
+	addr = 2048 + 1024;
+	size = 768;
+
+	/* currently we allocate TX FIFOs for all possible endpoints,
+	 * and assume that they are all the same size. */
+
+	for (ep = 1; ep <= 15; ep++) {
+		val = addr;
+		val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT;
+		addr += size;
+
+		writel(val, hsotg->regs + S3C_DPTXFSIZn(ep));
+	}
+
+	/* according to p428 of the design guide, we need to ensure that
+	 * all fifos are flushed before continuing */
+
+	writel(S3C_GRSTCTL_TxFNum(0x10) | S3C_GRSTCTL_TxFFlsh |
+	       S3C_GRSTCTL_RxFFlsh, hsotg->regs + S3C_GRSTCTL);
+
+	/* wait until the fifos are both flushed */
+	timeout = 100;
+	while (1) {
+		val = readl(hsotg->regs + S3C_GRSTCTL);
+
+		if ((val & (S3C_GRSTCTL_TxFFlsh | S3C_GRSTCTL_RxFFlsh)) == 0)
+			break;
+
+		if (--timeout == 0) {
+			dev_err(hsotg->dev,
+				"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
+				__func__, val);
+		}
+
+		udelay(1);
+	}
+
+	dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
+}
+
+/**
+ * @ep: USB endpoint to allocate request for.
+ * @flags: Allocation flags
+ *
+ * Allocate a new USB request structure appropriate for the specified endpoint
+ */
+static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
+						      gfp_t flags)
+{
+	struct s3c_hsotg_req *req;
+
+	req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+
+	req->req.dma = DMA_ADDR_INVALID;
+	return &req->req;
+}
+
+/**
+ * is_ep_periodic - return true if the endpoint is in periodic mode.
+ * @hs_ep: The endpoint to query.
+ *
+ * Returns true if the endpoint is in periodic mode, meaning it is being
+ * used for an Interrupt or ISO transfer.
+ */
+static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
+{
+	return hs_ep->periodic;
+}
+
+/**
+ * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint for the request
+ * @hs_req: The request being processed.
+ *
+ * This is the reverse of s3c_hsotg_map_dma(), called for the completion
+ * of a request to ensure the buffer is ready for access by the caller.
+*/
+static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
+				struct s3c_hsotg_ep *hs_ep,
+				struct s3c_hsotg_req *hs_req)
+{
+	struct usb_request *req = &hs_req->req;
+	enum dma_data_direction dir;
+
+	dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	/* ignore this if we're not moving any data */
+	if (hs_req->req.length == 0)
+		return;
+
+	if (hs_req->mapped) {
+		/* we mapped this, so unmap and remove the dma */
+
+		dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
+
+		req->dma = DMA_ADDR_INVALID;
+		hs_req->mapped = 0;
+	} else {
+		dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
+	}
+}
+
+/**
+ * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint we're going to write for.
+ * @hs_req: The request to write data for.
+ *
+ * This is called when the TxFIFO has some space in it to hold a new
+ * transmission and we have something to give it. The actual setup of
+ * the data size is done elsewhere, so all we have to do is to actually
+ * write the data.
+ *
+ * The return value is zero if there is more space (or nothing was done)
+ * otherwise -ENOSPC is returned if the FIFO space was used up.
+ *
+ * This routine is only needed for PIO
+*/
+static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
+				struct s3c_hsotg_ep *hs_ep,
+				struct s3c_hsotg_req *hs_req)
+{
+	bool periodic = is_ep_periodic(hs_ep);
+	u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS);
+	int buf_pos = hs_req->req.actual;
+	int to_write = hs_ep->size_loaded;
+	void *data;
+	int can_write;
+	int pkt_round;
+
+	to_write -= (buf_pos - hs_ep->last_load);
+
+	/* if there's nothing to write, get out early */
+	if (to_write == 0)
+		return 0;
+
+	if (periodic && !hsotg->dedicated_fifos) {
+		u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
+		int size_left;
+		int size_done;
+
+		/* work out how much data was loaded so we can calculate
+		 * how much data is left in the fifo. */
+
+		size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+		/* if shared fifo, we cannot write anything until the
+		 * previous data has been completely sent.
+		 */
+		if (hs_ep->fifo_load != 0) {
+			s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+			return -ENOSPC;
+		}
+
+		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
+			__func__, size_left,
+			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
+
+		/* how much of the data has moved */
+		size_done = hs_ep->size_loaded - size_left;
+
+		/* how much data is left in the fifo */
+		can_write = hs_ep->fifo_load - size_done;
+		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
+			__func__, can_write);
+
+		can_write = hs_ep->fifo_size - can_write;
+		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
+			__func__, can_write);
+
+		if (can_write <= 0) {
+			s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+			return -ENOSPC;
+		}
+	} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
+		can_write = readl(hsotg->regs + S3C_DTXFSTS(hs_ep->index));
+
+		can_write &= 0xffff;
+		can_write *= 4;
+	} else {
+		if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
+			dev_dbg(hsotg->dev,
+				"%s: no queue slots available (0x%08x)\n",
+				__func__, gnptxsts);
+
+			s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
+			return -ENOSPC;
+		}
+
+		can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
+		can_write *= 4;	/* fifo size is in 32bit quantities. */
+	}
+
+	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
+		 __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
+
+	/* limit to 512 bytes of data, it seems at least on the non-periodic
+	 * FIFO, requests of >512 cause the endpoint to get stuck with a
+	 * fragment of the end of the transfer in it.
+	 */
+	if (can_write > 512)
+		can_write = 512;
+
+	/* limit the write to one max-packet size worth of data, but allow
+	 * the transfer to return that it did not run out of fifo space
+	 * doing it. */
+	if (to_write > hs_ep->ep.maxpacket) {
+		to_write = hs_ep->ep.maxpacket;
+
+		s3c_hsotg_en_gsint(hsotg,
+				   periodic ? S3C_GINTSTS_PTxFEmp :
+				   S3C_GINTSTS_NPTxFEmp);
+	}
+
+	/* see if we can write data */
+
+	if (to_write > can_write) {
+		to_write = can_write;
+		pkt_round = to_write % hs_ep->ep.maxpacket;
+
+		/* Not sure, but we probably shouldn't be writing partial
+		 * packets into the FIFO, so round the write down to an
+		 * exact number of packets.
+		 *
+		 * Note, we do not currently check to see if we can ever
+		 * write a full packet or not to the FIFO.
+		 */
+
+		if (pkt_round)
+			to_write -= pkt_round;
+
+		/* enable correct FIFO interrupt to alert us when there
+		 * is more room left. */
+
+		s3c_hsotg_en_gsint(hsotg,
+				   periodic ? S3C_GINTSTS_PTxFEmp :
+				   S3C_GINTSTS_NPTxFEmp);
+	}
+
+	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
+		 to_write, hs_req->req.length, can_write, buf_pos);
+
+	if (to_write <= 0)
+		return -ENOSPC;
+
+	hs_req->req.actual = buf_pos + to_write;
+	hs_ep->total_data += to_write;
+
+	if (periodic)
+		hs_ep->fifo_load += to_write;
+
+	to_write = DIV_ROUND_UP(to_write, 4);
+	data = hs_req->req.buf + buf_pos;
+
+	writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write);
+
+	return (to_write >= can_write) ? -ENOSPC : 0;
+}
+
+/**
+ * get_ep_limit - get the maximum data legnth for this endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * so that transfers that are too long can be split.
+ */
+static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
+{
+	int index = hs_ep->index;
+	unsigned maxsize;
+	unsigned maxpkt;
+
+	if (index != 0) {
+		maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1;
+		maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1;
+	} else {
+		maxsize = 64+64;
+		if (hs_ep->dir_in)
+			maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1;
+		else
+			maxpkt = 2;
+	}
+
+	/* we made the constant loading easier above by using +1 */
+	maxpkt--;
+	maxsize--;
+
+	/* constrain by packet count if maxpkts*pktsize is greater
+	 * than the length register size. */
+
+	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
+		maxsize = maxpkt * hs_ep->ep.maxpacket;
+
+	return maxsize;
+}
+
+/**
+ * s3c_hsotg_start_req - start a USB request from an endpoint's queue
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint to process a request for
+ * @hs_req: The request to start.
+ * @continuing: True if we are doing more for the current request.
+ *
+ * Start the given request running by setting the endpoint registers
+ * appropriately, and writing any data to the FIFOs.
+ */
+static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
+				struct s3c_hsotg_ep *hs_ep,
+				struct s3c_hsotg_req *hs_req,
+				bool continuing)
+{
+	struct usb_request *ureq = &hs_req->req;
+	int index = hs_ep->index;
+	int dir_in = hs_ep->dir_in;
+	u32 epctrl_reg;
+	u32 epsize_reg;
+	u32 epsize;
+	u32 ctrl;
+	unsigned length;
+	unsigned packets;
+	unsigned maxreq;
+
+	if (index != 0) {
+		if (hs_ep->req && !continuing) {
+			dev_err(hsotg->dev, "%s: active request\n", __func__);
+			WARN_ON(1);
+			return;
+		} else if (hs_ep->req != hs_req && continuing) {
+			dev_err(hsotg->dev,
+				"%s: continue different req\n", __func__);
+			WARN_ON(1);
+			return;
+		}
+	}
+
+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+	epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index);
+
+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
+		__func__, readl(hsotg->regs + epctrl_reg), index,
+		hs_ep->dir_in ? "in" : "out");
+
+	/* If endpoint is stalled, we will restart request later */
+	ctrl = readl(hsotg->regs + epctrl_reg);
+
+	if (ctrl & S3C_DxEPCTL_Stall) {
+		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
+		return;
+	}
+
+	length = ureq->length - ureq->actual;
+
+	if (0)
+		dev_dbg(hsotg->dev,
+			"REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
+			ureq->buf, length, ureq->dma,
+			ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
+
+	maxreq = get_ep_limit(hs_ep);
+	if (length > maxreq) {
+		int round = maxreq % hs_ep->ep.maxpacket;
+
+		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
+			__func__, length, maxreq, round);
+
+		/* round down to multiple of packets */
+		if (round)
+			maxreq -= round;
+
+		length = maxreq;
+	}
+
+	if (length)
+		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
+	else
+		packets = 1;	/* send one packet if length is zero. */
+
+	if (dir_in && index != 0)
+		epsize = S3C_DxEPTSIZ_MC(1);
+	else
+		epsize = 0;
+
+	if (index != 0 && ureq->zero) {
+		/* test for the packets being exactly right for the
+		 * transfer */
+
+		if (length == (packets * hs_ep->ep.maxpacket))
+			packets++;
+	}
+
+	epsize |= S3C_DxEPTSIZ_PktCnt(packets);
+	epsize |= S3C_DxEPTSIZ_XferSize(length);
+
+	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
+		__func__, packets, length, ureq->length, epsize, epsize_reg);
+
+	/* store the request as the current one we're doing */
+	hs_ep->req = hs_req;
+
+	/* write size / packets */
+	writel(epsize, hsotg->regs + epsize_reg);
+
+	if (using_dma(hsotg) && !continuing) {
+		unsigned int dma_reg;
+
+		/* write DMA address to control register, buffer already
+		 * synced by s3c_hsotg_ep_queue().  */
+
+		dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index);
+		writel(ureq->dma, hsotg->regs + dma_reg);
+
+		dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
+			__func__, ureq->dma, dma_reg);
+	}
+
+	ctrl |= S3C_DxEPCTL_EPEna;	/* ensure ep enabled */
+	ctrl |= S3C_DxEPCTL_USBActEp;
+	ctrl |= S3C_DxEPCTL_CNAK;	/* clear NAK set by core */
+
+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+	writel(ctrl, hsotg->regs + epctrl_reg);
+
+	/* set these, it seems that DMA support increments past the end
+	 * of the packet buffer so we need to calculate the length from
+	 * this information. */
+	hs_ep->size_loaded = length;
+	hs_ep->last_load = ureq->actual;
+
+	if (dir_in && !using_dma(hsotg)) {
+		/* set these anyway, we may need them for non-periodic in */
+		hs_ep->fifo_load = 0;
+
+		s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+	}
+
+	/* clear the INTknTXFEmpMsk when we start request, more as a aide
+	 * to debugging to see what is going on. */
+	if (dir_in)
+		writel(S3C_DIEPMSK_INTknTXFEmpMsk,
+		       hsotg->regs + S3C_DIEPINT(index));
+
+	/* Note, trying to clear the NAK here causes problems with transmit
+	 * on the S3C6400 ending up with the TXFIFO becoming full. */
+
+	/* check ep is enabled */
+	if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna))
+		dev_warn(hsotg->dev,
+			 "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
+			 index, readl(hsotg->regs + epctrl_reg));
+
+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
+		__func__, readl(hsotg->regs + epctrl_reg));
+}
+
+/**
+ * s3c_hsotg_map_dma - map the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request is on.
+ * @req: The request being processed.
+ *
+ * We've been asked to queue a request, so ensure that the memory buffer
+ * is correctly setup for DMA. If we've been passed an extant DMA address
+ * then ensure the buffer has been synced to memory. If our buffer has no
+ * DMA memory, then we map the memory and mark our request to allow us to
+ * cleanup on completion.
+*/
+static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
+			     struct s3c_hsotg_ep *hs_ep,
+			     struct usb_request *req)
+{
+	enum dma_data_direction dir;
+	struct s3c_hsotg_req *hs_req = our_req(req);
+
+	dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	/* if the length is zero, ignore the DMA data */
+	if (hs_req->req.length == 0)
+		return 0;
+
+	if (req->dma == DMA_ADDR_INVALID) {
+		dma_addr_t dma;
+
+		dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
+
+		if (unlikely(dma_mapping_error(hsotg->dev, dma)))
+			goto dma_error;
+
+		if (dma & 3) {
+			dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
+				__func__);
+
+			dma_unmap_single(hsotg->dev, dma, req->length, dir);
+			return -EINVAL;
+		}
+
+		hs_req->mapped = 1;
+		req->dma = dma;
+	} else {
+		dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
+		hs_req->mapped = 0;
+	}
+
+	return 0;
+
+dma_error:
+	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
+		__func__, req->buf, req->length);
+
+	return -EIO;
+}
+
+static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+			      gfp_t gfp_flags)
+{
+	struct s3c_hsotg_req *hs_req = our_req(req);
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hs = hs_ep->parent;
+	unsigned long irqflags;
+	bool first;
+
+	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
+		ep->name, req, req->length, req->buf, req->no_interrupt,
+		req->zero, req->short_not_ok);
+
+	/* initialise status of the request */
+	INIT_LIST_HEAD(&hs_req->queue);
+	req->actual = 0;
+	req->status = -EINPROGRESS;
+
+	/* if we're using DMA, sync the buffers as necessary */
+	if (using_dma(hs)) {
+		int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irqsave(&hs_ep->lock, irqflags);
+
+	first = list_empty(&hs_ep->queue);
+	list_add_tail(&hs_req->queue, &hs_ep->queue);
+
+	if (first)
+		s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
+
+	spin_unlock_irqrestore(&hs_ep->lock, irqflags);
+
+	return 0;
+}
+
+static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
+				      struct usb_request *req)
+{
+	struct s3c_hsotg_req *hs_req = our_req(req);
+
+	kfree(hs_req);
+}
+
+/**
+ * s3c_hsotg_complete_oursetup - setup completion callback
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself
+ * submitted that need cleaning up.
+ */
+static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
+					struct usb_request *req)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hsotg = hs_ep->parent;
+
+	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
+
+	s3c_hsotg_ep_free_request(ep, req);
+}
+
+/**
+ * ep_from_windex - convert control wIndex value to endpoint
+ * @hsotg: The driver state.
+ * @windex: The control request wIndex field (in host order).
+ *
+ * Convert the given wIndex into a pointer to an driver endpoint
+ * structure, or return NULL if it is not a valid endpoint.
+*/
+static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
+					   u32 windex)
+{
+	struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
+	int dir = (windex & USB_DIR_IN) ? 1 : 0;
+	int idx = windex & 0x7F;
+
+	if (windex >= 0x100)
+		return NULL;
+
+	if (idx > S3C_HSOTG_EPS)
+		return NULL;
+
+	if (idx && ep->dir_in != dir)
+		return NULL;
+
+	return ep;
+}
+
+/**
+ * s3c_hsotg_send_reply - send reply to control request
+ * @hsotg: The device state
+ * @ep: Endpoint 0
+ * @buff: Buffer for request
+ * @length: Length of reply.
+ *
+ * Create a request and queue it on the given endpoint. This is useful as
+ * an internal method of sending replies to certain control requests, etc.
+ */
+static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
+				struct s3c_hsotg_ep *ep,
+				void *buff,
+				int length)
+{
+	struct usb_request *req;
+	int ret;
+
+	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
+
+	req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
+	hsotg->ep0_reply = req;
+	if (!req) {
+		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
+		return -ENOMEM;
+	}
+
+	req->buf = hsotg->ep0_buff;
+	req->length = length;
+	req->zero = 1; /* always do zero-length final transfer */
+	req->complete = s3c_hsotg_complete_oursetup;
+
+	if (length)
+		memcpy(req->buf, buff, length);
+	else
+		ep->sent_zlp = 1;
+
+	ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
+	if (ret) {
+		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * s3c_hsotg_process_req_status - process request GET_STATUS
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
+					struct usb_ctrlrequest *ctrl)
+{
+	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+	struct s3c_hsotg_ep *ep;
+	__le16 reply;
+	int ret;
+
+	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
+
+	if (!ep0->dir_in) {
+		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		reply = cpu_to_le16(0); /* bit 0 => self powered,
+					 * bit 1 => remote wakeup */
+		break;
+
+	case USB_RECIP_INTERFACE:
+		/* currently, the data result should be zero */
+		reply = cpu_to_le16(0);
+		break;
+
+	case USB_RECIP_ENDPOINT:
+		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+		if (!ep)
+			return -ENOENT;
+
+		reply = cpu_to_le16(ep->halted ? 1 : 0);
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (le16_to_cpu(ctrl->wLength) != 2)
+		return -EINVAL;
+
+	ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
+	if (ret) {
+		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
+		return ret;
+	}
+
+	return 1;
+}
+
+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+
+/**
+ * get_ep_head - return the first request on the endpoint
+ * @hs_ep: The controller endpoint to get
+ *
+ * Get the first request on the endpoint.
+ */
+static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
+{
+	if (list_empty(&hs_ep->queue))
+		return NULL;
+
+	return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
+}
+
+/**
+ * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
+					 struct usb_ctrlrequest *ctrl)
+{
+	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+	struct s3c_hsotg_req *hs_req;
+	bool restart;
+	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+	struct s3c_hsotg_ep *ep;
+	int ret;
+
+	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
+		__func__, set ? "SET" : "CLEAR");
+
+	if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
+		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+		if (!ep) {
+			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
+				__func__, le16_to_cpu(ctrl->wIndex));
+			return -ENOENT;
+		}
+
+		switch (le16_to_cpu(ctrl->wValue)) {
+		case USB_ENDPOINT_HALT:
+			s3c_hsotg_ep_sethalt(&ep->ep, set);
+
+			ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+			if (ret) {
+				dev_err(hsotg->dev,
+					"%s: failed to send reply\n", __func__);
+				return ret;
+			}
+
+			if (!set) {
+				/*
+				 * If we have request in progress,
+				 * then complete it
+				 */
+				if (ep->req) {
+					hs_req = ep->req;
+					ep->req = NULL;
+					list_del_init(&hs_req->queue);
+					hs_req->req.complete(&ep->ep,
+							     &hs_req->req);
+				}
+
+				/* If we have pending request, then start it */
+				restart = !list_empty(&ep->queue);
+				if (restart) {
+					hs_req = get_ep_head(ep);
+					s3c_hsotg_start_req(hsotg, ep,
+							    hs_req, false);
+				}
+			}
+
+			break;
+
+		default:
+			return -ENOENT;
+		}
+	} else
+		return -ENOENT;  /* currently only deal with endpoint */
+
+	return 1;
+}
+
+/**
+ * s3c_hsotg_process_control - process a control request
+ * @hsotg: The device state
+ * @ctrl: The control request received
+ *
+ * The controller has received the SETUP phase of a control request, and
+ * needs to work out what to do next (and whether to pass it on to the
+ * gadget driver).
+ */
+static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
+				      struct usb_ctrlrequest *ctrl)
+{
+	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+	int ret = 0;
+	u32 dcfg;
+
+	ep0->sent_zlp = 0;
+
+	dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
+		 ctrl->bRequest, ctrl->bRequestType,
+		 ctrl->wValue, ctrl->wLength);
+
+	/* record the direction of the request, for later use when enquing
+	 * packets onto EP0. */
+
+	ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
+	dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
+
+	/* if we've no data with this request, then the last part of the
+	 * transaction is going to implicitly be IN. */
+	if (ctrl->wLength == 0)
+		ep0->dir_in = 1;
+
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (ctrl->bRequest) {
+		case USB_REQ_SET_ADDRESS:
+			dcfg = readl(hsotg->regs + S3C_DCFG);
+			dcfg &= ~S3C_DCFG_DevAddr_MASK;
+			dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT;
+			writel(dcfg, hsotg->regs + S3C_DCFG);
+
+			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
+
+			ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+			return;
+
+		case USB_REQ_GET_STATUS:
+			ret = s3c_hsotg_process_req_status(hsotg, ctrl);
+			break;
+
+		case USB_REQ_CLEAR_FEATURE:
+		case USB_REQ_SET_FEATURE:
+			ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
+			break;
+		}
+	}
+
+	/* as a fallback, try delivering it to the driver to deal with */
+
+	if (ret == 0 && hsotg->driver) {
+		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
+		if (ret < 0)
+			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
+	}
+
+	/* the request is either unhandlable, or is not formatted correctly
+	 * so respond with a STALL for the status stage to indicate failure.
+	 */
+
+	if (ret < 0) {
+		u32 reg;
+		u32 ctrl;
+
+		dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
+		reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0;
+
+		/* S3C_DxEPCTL_Stall will be cleared by EP once it has
+		 * taken effect, so no need to clear later. */
+
+		ctrl = readl(hsotg->regs + reg);
+		ctrl |= S3C_DxEPCTL_Stall;
+		ctrl |= S3C_DxEPCTL_CNAK;
+		writel(ctrl, hsotg->regs + reg);
+
+		dev_dbg(hsotg->dev,
+			"written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
+			ctrl, reg, readl(hsotg->regs + reg));
+
+		/* don't believe we need to anything more to get the EP
+		 * to reply with a STALL packet */
+	}
+}
+
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
+
+/**
+ * s3c_hsotg_complete_setup - completion of a setup transfer
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself submitted for
+ * EP0 setup packets
+ */
+static void s3c_hsotg_complete_setup(struct usb_ep *ep,
+				     struct usb_request *req)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hsotg = hs_ep->parent;
+
+	if (req->status < 0) {
+		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
+		return;
+	}
+
+	if (req->actual == 0)
+		s3c_hsotg_enqueue_setup(hsotg);
+	else
+		s3c_hsotg_process_control(hsotg, req->buf);
+}
+
+/**
+ * s3c_hsotg_enqueue_setup - start a request for EP0 packets
+ * @hsotg: The device state.
+ *
+ * Enqueue a request on EP0 if necessary to received any SETUP packets
+ * received from the host.
+ */
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
+{
+	struct usb_request *req = hsotg->ctrl_req;
+	struct s3c_hsotg_req *hs_req = our_req(req);
+	int ret;
+
+	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
+
+	req->zero = 0;
+	req->length = 8;
+	req->buf = hsotg->ctrl_buff;
+	req->complete = s3c_hsotg_complete_setup;
+
+	if (!list_empty(&hs_req->queue)) {
+		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
+		return;
+	}
+
+	hsotg->eps[0].dir_in = 0;
+
+	ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
+	if (ret < 0) {
+		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
+		/* Don't think there's much we can do other than watch the
+		 * driver fail. */
+	}
+}
+
+/**
+ * s3c_hsotg_complete_request - complete a request given to us
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * The given request has finished, so call the necessary completion
+ * if it has one and then look to see if we can start a new request
+ * on the endpoint.
+ *
+ * Note, expects the ep to already be locked as appropriate.
+*/
+static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
+				       struct s3c_hsotg_ep *hs_ep,
+				       struct s3c_hsotg_req *hs_req,
+				       int result)
+{
+	bool restart;
+
+	if (!hs_req) {
+		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
+		return;
+	}
+
+	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
+		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
+
+	/* only replace the status if we've not already set an error
+	 * from a previous transaction */
+
+	if (hs_req->req.status == -EINPROGRESS)
+		hs_req->req.status = result;
+
+	hs_ep->req = NULL;
+	list_del_init(&hs_req->queue);
+
+	if (using_dma(hsotg))
+		s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
+
+	/* call the complete request with the locks off, just in case the
+	 * request tries to queue more work for this endpoint. */
+
+	if (hs_req->req.complete) {
+		spin_unlock(&hs_ep->lock);
+		hs_req->req.complete(&hs_ep->ep, &hs_req->req);
+		spin_lock(&hs_ep->lock);
+	}
+
+	/* Look to see if there is anything else to do. Note, the completion
+	 * of the previous request may have caused a new request to be started
+	 * so be careful when doing this. */
+
+	if (!hs_ep->req && result >= 0) {
+		restart = !list_empty(&hs_ep->queue);
+		if (restart) {
+			hs_req = get_ep_head(hs_ep);
+			s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
+		}
+	}
+}
+
+/**
+ * s3c_hsotg_complete_request_lock - complete a request given to us (locked)
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * See s3c_hsotg_complete_request(), but called with the endpoint's
+ * lock held.
+*/
+static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,
+					    struct s3c_hsotg_ep *hs_ep,
+					    struct s3c_hsotg_req *hs_req,
+					    int result)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hs_ep->lock, flags);
+	s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
+	spin_unlock_irqrestore(&hs_ep->lock, flags);
+}
+
+/**
+ * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
+ * @hsotg: The device state.
+ * @ep_idx: The endpoint index for the data
+ * @size: The size of data in the fifo, in bytes
+ *
+ * The FIFO status shows there is data to read from the FIFO for a given
+ * endpoint, so sort out whether we need to read the data into a request
+ * that has been made for that endpoint.
+ */
+static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
+{
+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
+	struct s3c_hsotg_req *hs_req = hs_ep->req;
+	void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx);
+	int to_read;
+	int max_req;
+	int read_ptr;
+
+	if (!hs_req) {
+		u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx));
+		int ptr;
+
+		dev_warn(hsotg->dev,
+			 "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
+			 __func__, size, ep_idx, epctl);
+
+		/* dump the data from the FIFO, we've nothing we can do */
+		for (ptr = 0; ptr < size; ptr += 4)
+			(void)readl(fifo);
+
+		return;
+	}
+
+	spin_lock(&hs_ep->lock);
+
+	to_read = size;
+	read_ptr = hs_req->req.actual;
+	max_req = hs_req->req.length - read_ptr;
+
+	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
+		__func__, to_read, max_req, read_ptr, hs_req->req.length);
+
+	if (to_read > max_req) {
+		/* more data appeared than we where willing
+		 * to deal with in this request.
+		 */
+
+		/* currently we don't deal this */
+		WARN_ON_ONCE(1);
+	}
+
+	hs_ep->total_data += to_read;
+	hs_req->req.actual += to_read;
+	to_read = DIV_ROUND_UP(to_read, 4);
+
+	/* note, we might over-write the buffer end by 3 bytes depending on
+	 * alignment of the data. */
+	readsl(fifo, hs_req->req.buf + read_ptr, to_read);
+
+	spin_unlock(&hs_ep->lock);
+}
+
+/**
+ * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
+ * @hsotg: The device instance
+ * @req: The request currently on this endpoint
+ *
+ * Generate a zero-length IN packet request for terminating a SETUP
+ * transaction.
+ *
+ * Note, since we don't write any data to the TxFIFO, then it is
+ * currently believed that we do not need to wait for any space in
+ * the TxFIFO.
+ */
+static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
+			       struct s3c_hsotg_req *req)
+{
+	u32 ctrl;
+
+	if (!req) {
+		dev_warn(hsotg->dev, "%s: no request?\n", __func__);
+		return;
+	}
+
+	if (req->req.length == 0) {
+		hsotg->eps[0].sent_zlp = 1;
+		s3c_hsotg_enqueue_setup(hsotg);
+		return;
+	}
+
+	hsotg->eps[0].dir_in = 1;
+	hsotg->eps[0].sent_zlp = 1;
+
+	dev_dbg(hsotg->dev, "sending zero-length packet\n");
+
+	/* issue a zero-sized packet to terminate this */
+	writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
+	       S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0));
+
+	ctrl = readl(hsotg->regs + S3C_DIEPCTL0);
+	ctrl |= S3C_DxEPCTL_CNAK;  /* clear NAK set by core */
+	ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
+	ctrl |= S3C_DxEPCTL_USBActEp;
+	writel(ctrl, hsotg->regs + S3C_DIEPCTL0);
+}
+
+/**
+ * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
+ * @hsotg: The device instance
+ * @epnum: The endpoint received from
+ * @was_setup: Set if processing a SetupDone event.
+ *
+ * The RXFIFO has delivered an OutDone event, which means that the data
+ * transfer for an OUT endpoint has been completed, either by a short
+ * packet or by the finish of a transfer.
+*/
+static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
+				     int epnum, bool was_setup)
+{
+	u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));
+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
+	struct s3c_hsotg_req *hs_req = hs_ep->req;
+	struct usb_request *req = &hs_req->req;
+	unsigned size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+	int result = 0;
+
+	if (!hs_req) {
+		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
+		return;
+	}
+
+	if (using_dma(hsotg)) {
+		unsigned size_done;
+
+		/* Calculate the size of the transfer by checking how much
+		 * is left in the endpoint size register and then working it
+		 * out from the amount we loaded for the transfer.
+		 *
+		 * We need to do this as DMA pointers are always 32bit aligned
+		 * so may overshoot/undershoot the transfer.
+		 */
+
+		size_done = hs_ep->size_loaded - size_left;
+		size_done += hs_ep->last_load;
+
+		req->actual = size_done;
+	}
+
+	/* if there is more request to do, schedule new transfer */
+	if (req->actual < req->length && size_left == 0) {
+		s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+		return;
+	}
+
+	if (req->actual < req->length && req->short_not_ok) {
+		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
+			__func__, req->actual, req->length);
+
+		/* todo - what should we return here? there's no one else
+		 * even bothering to check the status. */
+	}
+
+	if (epnum == 0) {
+		if (!was_setup && req->complete != s3c_hsotg_complete_setup)
+			s3c_hsotg_send_zlp(hsotg, hs_req);
+	}
+
+	s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);
+}
+
+/**
+ * s3c_hsotg_read_frameno - read current frame number
+ * @hsotg: The device instance
+ *
+ * Return the current frame number
+*/
+static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
+{
+	u32 dsts;
+
+	dsts = readl(hsotg->regs + S3C_DSTS);
+	dsts &= S3C_DSTS_SOFFN_MASK;
+	dsts >>= S3C_DSTS_SOFFN_SHIFT;
+
+	return dsts;
+}
+
+/**
+ * s3c_hsotg_handle_rx - RX FIFO has data
+ * @hsotg: The device instance
+ *
+ * The IRQ handler has detected that the RX FIFO has some data in it
+ * that requires processing, so find out what is in there and do the
+ * appropriate read.
+ *
+ * The RXFIFO is a true FIFO, the packets coming out are still in packet
+ * chunks, so if you have x packets received on an endpoint you'll get x
+ * FIFO events delivered, each with a packet's worth of data in it.
+ *
+ * When using DMA, we should not be processing events from the RXFIFO
+ * as the actual data should be sent to the memory directly and we turn
+ * on the completion interrupts to get notifications of transfer completion.
+ */
+static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
+{
+	u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
+	u32 epnum, status, size;
+
+	WARN_ON(using_dma(hsotg));
+
+	epnum = grxstsr & S3C_GRXSTS_EPNum_MASK;
+	status = grxstsr & S3C_GRXSTS_PktSts_MASK;
+
+	size = grxstsr & S3C_GRXSTS_ByteCnt_MASK;
+	size >>= S3C_GRXSTS_ByteCnt_SHIFT;
+
+	if (1)
+		dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
+			__func__, grxstsr, size, epnum);
+
+#define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT)
+
+	switch (status >> S3C_GRXSTS_PktSts_SHIFT) {
+	case __status(S3C_GRXSTS_PktSts_GlobalOutNAK):
+		dev_dbg(hsotg->dev, "GlobalOutNAK\n");
+		break;
+
+	case __status(S3C_GRXSTS_PktSts_OutDone):
+		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
+			s3c_hsotg_read_frameno(hsotg));
+
+		if (!using_dma(hsotg))
+			s3c_hsotg_handle_outdone(hsotg, epnum, false);
+		break;
+
+	case __status(S3C_GRXSTS_PktSts_SetupDone):
+		dev_dbg(hsotg->dev,
+			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+			s3c_hsotg_read_frameno(hsotg),
+			readl(hsotg->regs + S3C_DOEPCTL(0)));
+
+		s3c_hsotg_handle_outdone(hsotg, epnum, true);
+		break;
+
+	case __status(S3C_GRXSTS_PktSts_OutRX):
+		s3c_hsotg_rx_data(hsotg, epnum, size);
+		break;
+
+	case __status(S3C_GRXSTS_PktSts_SetupRX):
+		dev_dbg(hsotg->dev,
+			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+			s3c_hsotg_read_frameno(hsotg),
+			readl(hsotg->regs + S3C_DOEPCTL(0)));
+
+		s3c_hsotg_rx_data(hsotg, epnum, size);
+		break;
+
+	default:
+		dev_warn(hsotg->dev, "%s: unknown status %08x\n",
+			 __func__, grxstsr);
+
+		s3c_hsotg_dump(hsotg);
+		break;
+	}
+}
+
+/**
+ * s3c_hsotg_ep0_mps - turn max packet size into register setting
+ * @mps: The maximum packet size in bytes.
+*/
+static u32 s3c_hsotg_ep0_mps(unsigned int mps)
+{
+	switch (mps) {
+	case 64:
+		return S3C_D0EPCTL_MPS_64;
+	case 32:
+		return S3C_D0EPCTL_MPS_32;
+	case 16:
+		return S3C_D0EPCTL_MPS_16;
+	case 8:
+		return S3C_D0EPCTL_MPS_8;
+	}
+
+	/* bad max packet size, warn and return invalid result */
+	WARN_ON(1);
+	return (u32)-1;
+}
+
+/**
+ * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
+ * @hsotg: The driver state.
+ * @ep: The index number of the endpoint
+ * @mps: The maximum packet size in bytes
+ *
+ * Configure the maximum packet size for the given endpoint, updating
+ * the hardware control registers to reflect this.
+ */
+static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
+				       unsigned int ep, unsigned int mps)
+{
+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
+	void __iomem *regs = hsotg->regs;
+	u32 mpsval;
+	u32 reg;
+
+	if (ep == 0) {
+		/* EP0 is a special case */
+		mpsval = s3c_hsotg_ep0_mps(mps);
+		if (mpsval > 3)
+			goto bad_mps;
+	} else {
+		if (mps >= S3C_DxEPCTL_MPS_LIMIT+1)
+			goto bad_mps;
+
+		mpsval = mps;
+	}
+
+	hs_ep->ep.maxpacket = mps;
+
+	/* update both the in and out endpoint controldir_ registers, even
+	 * if one of the directions may not be in use. */
+
+	reg = readl(regs + S3C_DIEPCTL(ep));
+	reg &= ~S3C_DxEPCTL_MPS_MASK;
+	reg |= mpsval;
+	writel(reg, regs + S3C_DIEPCTL(ep));
+
+	if (ep) {
+		reg = readl(regs + S3C_DOEPCTL(ep));
+		reg &= ~S3C_DxEPCTL_MPS_MASK;
+		reg |= mpsval;
+		writel(reg, regs + S3C_DOEPCTL(ep));
+	}
+
+	return;
+
+bad_mps:
+	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
+}
+
+/**
+ * s3c_hsotg_txfifo_flush - flush Tx FIFO
+ * @hsotg: The driver state
+ * @idx: The index for the endpoint (0..15)
+ */
+static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
+{
+	int timeout;
+	int val;
+
+	writel(S3C_GRSTCTL_TxFNum(idx) | S3C_GRSTCTL_TxFFlsh,
+		hsotg->regs + S3C_GRSTCTL);
+
+	/* wait until the fifo is flushed */
+	timeout = 100;
+
+	while (1) {
+		val = readl(hsotg->regs + S3C_GRSTCTL);
+
+		if ((val & (S3C_GRSTCTL_TxFFlsh)) == 0)
+			break;
+
+		if (--timeout == 0) {
+			dev_err(hsotg->dev,
+				"%s: timeout flushing fifo (GRSTCTL=%08x)\n",
+				__func__, val);
+		}
+
+		udelay(1);
+	}
+}
+
+/**
+ * s3c_hsotg_trytx - check to see if anything needs transmitting
+ * @hsotg: The driver state
+ * @hs_ep: The driver endpoint to check.
+ *
+ * Check to see if there is a request that has data to send, and if so
+ * make an attempt to write data into the FIFO.
+ */
+static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
+			   struct s3c_hsotg_ep *hs_ep)
+{
+	struct s3c_hsotg_req *hs_req = hs_ep->req;
+
+	if (!hs_ep->dir_in || !hs_req)
+		return 0;
+
+	if (hs_req->req.actual < hs_req->req.length) {
+		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
+			hs_ep->index);
+		return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+	}
+
+	return 0;
+}
+
+/**
+ * s3c_hsotg_complete_in - complete IN transfer
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint that has just completed.
+ *
+ * An IN transfer has been completed, update the transfer's state and then
+ * call the relevant completion routines.
+ */
+static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
+				  struct s3c_hsotg_ep *hs_ep)
+{
+	struct s3c_hsotg_req *hs_req = hs_ep->req;
+	u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
+	int size_left, size_done;
+
+	if (!hs_req) {
+		dev_dbg(hsotg->dev, "XferCompl but no req\n");
+		return;
+	}
+
+	/* Calculate the size of the transfer by checking how much is left
+	 * in the endpoint size register and then working it out from
+	 * the amount we loaded for the transfer.
+	 *
+	 * We do this even for DMA, as the transfer may have incremented
+	 * past the end of the buffer (DMA transfers are always 32bit
+	 * aligned).
+	 */
+
+	size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+	size_done = hs_ep->size_loaded - size_left;
+	size_done += hs_ep->last_load;
+
+	if (hs_req->req.actual != size_done)
+		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
+			__func__, hs_req->req.actual, size_done);
+
+	hs_req->req.actual = size_done;
+
+	/* if we did all of the transfer, and there is more data left
+	 * around, then try restarting the rest of the request */
+
+	if (!size_left && hs_req->req.actual < hs_req->req.length) {
+		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
+		s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+	} else
+		s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
+}
+
+/**
+ * s3c_hsotg_epint - handle an in/out endpoint interrupt
+ * @hsotg: The driver state
+ * @idx: The index for the endpoint (0..15)
+ * @dir_in: Set if this is an IN endpoint
+ *
+ * Process and clear any interrupt pending for an individual endpoint
+*/
+static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
+			    int dir_in)
+{
+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
+	u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx);
+	u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx);
+	u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx);
+	u32 ints;
+
+	ints = readl(hsotg->regs + epint_reg);
+
+	/* Clear endpoint interrupts */
+	writel(ints, hsotg->regs + epint_reg);
+
+	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
+		__func__, idx, dir_in ? "in" : "out", ints);
+
+	if (ints & S3C_DxEPINT_XferCompl) {
+		dev_dbg(hsotg->dev,
+			"%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
+			__func__, readl(hsotg->regs + epctl_reg),
+			readl(hsotg->regs + epsiz_reg));
+
+		/* we get OutDone from the FIFO, so we only need to look
+		 * at completing IN requests here */
+		if (dir_in) {
+			s3c_hsotg_complete_in(hsotg, hs_ep);
+
+			if (idx == 0 && !hs_ep->req)
+				s3c_hsotg_enqueue_setup(hsotg);
+		} else if (using_dma(hsotg)) {
+			/* We're using DMA, we need to fire an OutDone here
+			 * as we ignore the RXFIFO. */
+
+			s3c_hsotg_handle_outdone(hsotg, idx, false);
+		}
+	}
+
+	if (ints & S3C_DxEPINT_EPDisbld) {
+		dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+
+		if (dir_in) {
+			int epctl = readl(hsotg->regs + epctl_reg);
+
+			s3c_hsotg_txfifo_flush(hsotg, idx);
+
+			if ((epctl & S3C_DxEPCTL_Stall) &&
+				(epctl & S3C_DxEPCTL_EPType_Bulk)) {
+				int dctl = readl(hsotg->regs + S3C_DCTL);
+
+				dctl |= S3C_DCTL_CGNPInNAK;
+				writel(dctl, hsotg->regs + S3C_DCTL);
+			}
+		}
+	}
+
+	if (ints & S3C_DxEPINT_AHBErr)
+		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
+
+	if (ints & S3C_DxEPINT_Setup) {  /* Setup or Timeout */
+		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
+
+		if (using_dma(hsotg) && idx == 0) {
+			/* this is the notification we've received a
+			 * setup packet. In non-DMA mode we'd get this
+			 * from the RXFIFO, instead we need to process
+			 * the setup here. */
+
+			if (dir_in)
+				WARN_ON_ONCE(1);
+			else
+				s3c_hsotg_handle_outdone(hsotg, 0, true);
+		}
+	}
+
+	if (ints & S3C_DxEPINT_Back2BackSetup)
+		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
+
+	if (dir_in) {
+		/* not sure if this is important, but we'll clear it anyway
+		 */
+		if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) {
+			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
+				__func__, idx);
+		}
+
+		/* this probably means something bad is happening */
+		if (ints & S3C_DIEPMSK_INTknEPMisMsk) {
+			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
+				 __func__, idx);
+		}
+
+		/* FIFO has space or is empty (see GAHBCFG) */
+		if (hsotg->dedicated_fifos &&
+		    ints & S3C_DIEPMSK_TxFIFOEmpty) {
+			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
+				__func__, idx);
+			if (!using_dma(hsotg))
+				s3c_hsotg_trytx(hsotg, hs_ep);
+		}
+	}
+}
+
+/**
+ * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
+ * @hsotg: The device state.
+ *
+ * Handle updating the device settings after the enumeration phase has
+ * been completed.
+*/
+static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
+{
+	u32 dsts = readl(hsotg->regs + S3C_DSTS);
+	int ep0_mps = 0, ep_mps;
+
+	/* This should signal the finish of the enumeration phase
+	 * of the USB handshaking, so we should now know what rate
+	 * we connected at. */
+
+	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
+
+	/* note, since we're limited by the size of transfer on EP0, and
+	 * it seems IN transfers must be a even number of packets we do
+	 * not advertise a 64byte MPS on EP0. */
+
+	/* catch both EnumSpd_FS and EnumSpd_FS48 */
+	switch (dsts & S3C_DSTS_EnumSpd_MASK) {
+	case S3C_DSTS_EnumSpd_FS:
+	case S3C_DSTS_EnumSpd_FS48:
+		hsotg->gadget.speed = USB_SPEED_FULL;
+		ep0_mps = EP0_MPS_LIMIT;
+		ep_mps = 64;
+		break;
+
+	case S3C_DSTS_EnumSpd_HS:
+		hsotg->gadget.speed = USB_SPEED_HIGH;
+		ep0_mps = EP0_MPS_LIMIT;
+		ep_mps = 512;
+		break;
+
+	case S3C_DSTS_EnumSpd_LS:
+		hsotg->gadget.speed = USB_SPEED_LOW;
+		/* note, we don't actually support LS in this driver at the
+		 * moment, and the documentation seems to imply that it isn't
+		 * supported by the PHYs on some of the devices.
+		 */
+		break;
+	}
+	dev_info(hsotg->dev, "new device is %s\n",
+		 usb_speed_string(hsotg->gadget.speed));
+
+	/* we should now know the maximum packet size for an
+	 * endpoint, so set the endpoints to a default value. */
+
+	if (ep0_mps) {
+		int i;
+		s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
+		for (i = 1; i < S3C_HSOTG_EPS; i++)
+			s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
+	}
+
+	/* ensure after enumeration our EP0 is active */
+
+	s3c_hsotg_enqueue_setup(hsotg);
+
+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+		readl(hsotg->regs + S3C_DIEPCTL0),
+		readl(hsotg->regs + S3C_DOEPCTL0));
+}
+
+/**
+ * kill_all_requests - remove all requests from the endpoint's queue
+ * @hsotg: The device state.
+ * @ep: The endpoint the requests may be on.
+ * @result: The result code to use.
+ * @force: Force removal of any current requests
+ *
+ * Go through the requests on the given endpoint and mark them
+ * completed with the given result code.
+ */
+static void kill_all_requests(struct s3c_hsotg *hsotg,
+			      struct s3c_hsotg_ep *ep,
+			      int result, bool force)
+{
+	struct s3c_hsotg_req *req, *treq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ep->lock, flags);
+
+	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+		/* currently, we can't do much about an already
+		 * running request on an in endpoint */
+
+		if (ep->req == req && ep->dir_in && !force)
+			continue;
+
+		s3c_hsotg_complete_request(hsotg, ep, req,
+					   result);
+	}
+
+	spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+#define call_gadget(_hs, _entry) \
+	if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN &&	\
+	    (_hs)->driver && (_hs)->driver->_entry)	\
+		(_hs)->driver->_entry(&(_hs)->gadget);
+
+/**
+ * s3c_hsotg_disconnect_irq - disconnect irq service
+ * @hsotg: The device state.
+ *
+ * A disconnect IRQ has been received, meaning that the host has
+ * lost contact with the bus. Remove all current transactions
+ * and signal the gadget driver that this has happened.
+*/
+static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg)
+{
+	unsigned ep;
+
+	for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
+		kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
+
+	call_gadget(hsotg, disconnect);
+}
+
+/**
+ * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
+ * @hsotg: The device state:
+ * @periodic: True if this is a periodic FIFO interrupt
+ */
+static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
+{
+	struct s3c_hsotg_ep *ep;
+	int epno, ret;
+
+	/* look through for any more data to transmit */
+
+	for (epno = 0; epno < S3C_HSOTG_EPS; epno++) {
+		ep = &hsotg->eps[epno];
+
+		if (!ep->dir_in)
+			continue;
+
+		if ((periodic && !ep->periodic) ||
+		    (!periodic && ep->periodic))
+			continue;
+
+		ret = s3c_hsotg_trytx(hsotg, ep);
+		if (ret < 0)
+			break;
+	}
+}
+
+static struct s3c_hsotg *our_hsotg;
+
+/* IRQ flags which will trigger a retry around the IRQ loop */
+#define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \
+			S3C_GINTSTS_PTxFEmp |  \
+			S3C_GINTSTS_RxFLvl)
+
+/**
+ * s3c_hsotg_irq - handle device interrupt
+ * @irq: The IRQ number triggered
+ * @pw: The pw value when registered the handler.
+ */
+static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
+{
+	struct s3c_hsotg *hsotg = pw;
+	int retry_count = 8;
+	u32 gintsts;
+	u32 gintmsk;
+
+irq_retry:
+	gintsts = readl(hsotg->regs + S3C_GINTSTS);
+	gintmsk = readl(hsotg->regs + S3C_GINTMSK);
+
+	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
+		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
+
+	gintsts &= gintmsk;
+
+	if (gintsts & S3C_GINTSTS_OTGInt) {
+		u32 otgint = readl(hsotg->regs + S3C_GOTGINT);
+
+		dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
+
+		writel(otgint, hsotg->regs + S3C_GOTGINT);
+	}
+
+	if (gintsts & S3C_GINTSTS_DisconnInt) {
+		dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__);
+		writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS);
+
+		s3c_hsotg_disconnect_irq(hsotg);
+	}
+
+	if (gintsts & S3C_GINTSTS_SessReqInt) {
+		dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
+		writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_EnumDone) {
+		writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS);
+
+		s3c_hsotg_irq_enumdone(hsotg);
+	}
+
+	if (gintsts & S3C_GINTSTS_ConIDStsChng) {
+		dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
+			readl(hsotg->regs + S3C_DSTS),
+			readl(hsotg->regs + S3C_GOTGCTL));
+
+		writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) {
+		u32 daint = readl(hsotg->regs + S3C_DAINT);
+		u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT;
+		u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT);
+		int ep;
+
+		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
+
+		for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
+			if (daint_out & 1)
+				s3c_hsotg_epint(hsotg, ep, 0);
+		}
+
+		for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
+			if (daint_in & 1)
+				s3c_hsotg_epint(hsotg, ep, 1);
+		}
+	}
+
+	if (gintsts & S3C_GINTSTS_USBRst) {
+		dev_info(hsotg->dev, "%s: USBRst\n", __func__);
+		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
+			readl(hsotg->regs + S3C_GNPTXSTS));
+
+		writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS);
+
+		kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true);
+
+		/* it seems after a reset we can end up with a situation
+		 * where the TXFIFO still has data in it... the docs
+		 * suggest resetting all the fifos, so use the init_fifo
+		 * code to relayout and flush the fifos.
+		 */
+
+		s3c_hsotg_init_fifo(hsotg);
+
+		s3c_hsotg_enqueue_setup(hsotg);
+	}
+
+	/* check both FIFOs */
+
+	if (gintsts & S3C_GINTSTS_NPTxFEmp) {
+		dev_dbg(hsotg->dev, "NPTxFEmp\n");
+
+		/* Disable the interrupt to stop it happening again
+		 * unless one of these endpoint routines decides that
+		 * it needs re-enabling */
+
+		s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
+		s3c_hsotg_irq_fifoempty(hsotg, false);
+	}
+
+	if (gintsts & S3C_GINTSTS_PTxFEmp) {
+		dev_dbg(hsotg->dev, "PTxFEmp\n");
+
+		/* See note in S3C_GINTSTS_NPTxFEmp */
+
+		s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+		s3c_hsotg_irq_fifoempty(hsotg, true);
+	}
+
+	if (gintsts & S3C_GINTSTS_RxFLvl) {
+		/* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
+		 * we need to retry s3c_hsotg_handle_rx if this is still
+		 * set. */
+
+		s3c_hsotg_handle_rx(hsotg);
+	}
+
+	if (gintsts & S3C_GINTSTS_ModeMis) {
+		dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
+		writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_USBSusp) {
+		dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n");
+		writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS);
+
+		call_gadget(hsotg, suspend);
+	}
+
+	if (gintsts & S3C_GINTSTS_WkUpInt) {
+		dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n");
+		writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS);
+
+		call_gadget(hsotg, resume);
+	}
+
+	if (gintsts & S3C_GINTSTS_ErlySusp) {
+		dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n");
+		writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS);
+	}
+
+	/* these next two seem to crop-up occasionally causing the core
+	 * to shutdown the USB transfer, so try clearing them and logging
+	 * the occurrence. */
+
+	if (gintsts & S3C_GINTSTS_GOUTNakEff) {
+		dev_info(hsotg->dev, "GOUTNakEff triggered\n");
+
+		writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL);
+
+		s3c_hsotg_dump(hsotg);
+	}
+
+	if (gintsts & S3C_GINTSTS_GINNakEff) {
+		dev_info(hsotg->dev, "GINNakEff triggered\n");
+
+		writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL);
+
+		s3c_hsotg_dump(hsotg);
+	}
+
+	/* if we've had fifo events, we should try and go around the
+	 * loop again to see if there's any point in returning yet. */
+
+	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
+			goto irq_retry;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * s3c_hsotg_ep_enable - enable the given endpoint
+ * @ep: The USB endpint to configure
+ * @desc: The USB endpoint descriptor to configure with.
+ *
+ * This is called from the USB gadget code's usb_ep_enable().
+*/
+static int s3c_hsotg_ep_enable(struct usb_ep *ep,
+			       const struct usb_endpoint_descriptor *desc)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hsotg = hs_ep->parent;
+	unsigned long flags;
+	int index = hs_ep->index;
+	u32 epctrl_reg;
+	u32 epctrl;
+	u32 mps;
+	int dir_in;
+	int ret = 0;
+
+	dev_dbg(hsotg->dev,
+		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
+		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
+		desc->wMaxPacketSize, desc->bInterval);
+
+	/* not to be called for EP0 */
+	WARN_ON(index == 0);
+
+	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
+	if (dir_in != hs_ep->dir_in) {
+		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
+		return -EINVAL;
+	}
+
+	mps = usb_endpoint_maxp(desc);
+
+	/* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
+
+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+	epctrl = readl(hsotg->regs + epctrl_reg);
+
+	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
+		__func__, epctrl, epctrl_reg);
+
+	spin_lock_irqsave(&hs_ep->lock, flags);
+
+	epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK);
+	epctrl |= S3C_DxEPCTL_MPS(mps);
+
+	/* mark the endpoint as active, otherwise the core may ignore
+	 * transactions entirely for this endpoint */
+	epctrl |= S3C_DxEPCTL_USBActEp;
+
+	/* set the NAK status on the endpoint, otherwise we might try and
+	 * do something with data that we've yet got a request to process
+	 * since the RXFIFO will take data for an endpoint even if the
+	 * size register hasn't been set.
+	 */
+
+	epctrl |= S3C_DxEPCTL_SNAK;
+
+	/* update the endpoint state */
+	hs_ep->ep.maxpacket = mps;
+
+	/* default, set to non-periodic */
+	hs_ep->periodic = 0;
+
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_ISOC:
+		dev_err(hsotg->dev, "no current ISOC support\n");
+		ret = -EINVAL;
+		goto out;
+
+	case USB_ENDPOINT_XFER_BULK:
+		epctrl |= S3C_DxEPCTL_EPType_Bulk;
+		break;
+
+	case USB_ENDPOINT_XFER_INT:
+		if (dir_in) {
+			/* Allocate our TxFNum by simply using the index
+			 * of the endpoint for the moment. We could do
+			 * something better if the host indicates how
+			 * many FIFOs we are expecting to use. */
+
+			hs_ep->periodic = 1;
+			epctrl |= S3C_DxEPCTL_TxFNum(index);
+		}
+
+		epctrl |= S3C_DxEPCTL_EPType_Intterupt;
+		break;
+
+	case USB_ENDPOINT_XFER_CONTROL:
+		epctrl |= S3C_DxEPCTL_EPType_Control;
+		break;
+	}
+
+	/* if the hardware has dedicated fifos, we must give each IN EP
+	 * a unique tx-fifo even if it is non-periodic.
+	 */
+	if (dir_in && hsotg->dedicated_fifos)
+		epctrl |= S3C_DxEPCTL_TxFNum(index);
+
+	/* for non control endpoints, set PID to D0 */
+	if (index)
+		epctrl |= S3C_DxEPCTL_SetD0PID;
+
+	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
+		__func__, epctrl);
+
+	writel(epctrl, hsotg->regs + epctrl_reg);
+	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
+		__func__, readl(hsotg->regs + epctrl_reg));
+
+	/* enable the endpoint interrupt */
+	s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
+
+out:
+	spin_unlock_irqrestore(&hs_ep->lock, flags);
+	return ret;
+}
+
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hsotg = hs_ep->parent;
+	int dir_in = hs_ep->dir_in;
+	int index = hs_ep->index;
+	unsigned long flags;
+	u32 epctrl_reg;
+	u32 ctrl;
+
+	dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
+
+	if (ep == &hsotg->eps[0].ep) {
+		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
+		return -EINVAL;
+	}
+
+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+
+	/* terminate all requests with shutdown */
+	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
+
+	spin_lock_irqsave(&hs_ep->lock, flags);
+
+	ctrl = readl(hsotg->regs + epctrl_reg);
+	ctrl &= ~S3C_DxEPCTL_EPEna;
+	ctrl &= ~S3C_DxEPCTL_USBActEp;
+	ctrl |= S3C_DxEPCTL_SNAK;
+
+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+	writel(ctrl, hsotg->regs + epctrl_reg);
+
+	/* disable endpoint interrupts */
+	s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
+
+	spin_unlock_irqrestore(&hs_ep->lock, flags);
+	return 0;
+}
+
+/**
+ * on_list - check request is on the given endpoint
+ * @ep: The endpoint to check.
+ * @test: The request to test if it is on the endpoint.
+*/
+static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
+{
+	struct s3c_hsotg_req *req, *treq;
+
+	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+		if (req == test)
+			return true;
+	}
+
+	return false;
+}
+
+static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+	struct s3c_hsotg_req *hs_req = our_req(req);
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hs = hs_ep->parent;
+	unsigned long flags;
+
+	dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
+
+	spin_lock_irqsave(&hs_ep->lock, flags);
+
+	if (!on_list(hs_ep, hs_req)) {
+		spin_unlock_irqrestore(&hs_ep->lock, flags);
+		return -EINVAL;
+	}
+
+	s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
+	spin_unlock_irqrestore(&hs_ep->lock, flags);
+
+	return 0;
+}
+
+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hs = hs_ep->parent;
+	int index = hs_ep->index;
+	unsigned long irqflags;
+	u32 epreg;
+	u32 epctl;
+	u32 xfertype;
+
+	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
+
+	spin_lock_irqsave(&hs_ep->lock, irqflags);
+
+	/* write both IN and OUT control registers */
+
+	epreg = S3C_DIEPCTL(index);
+	epctl = readl(hs->regs + epreg);
+
+	if (value) {
+		epctl |= S3C_DxEPCTL_Stall + S3C_DxEPCTL_SNAK;
+		if (epctl & S3C_DxEPCTL_EPEna)
+			epctl |= S3C_DxEPCTL_EPDis;
+	} else {
+		epctl &= ~S3C_DxEPCTL_Stall;
+		xfertype = epctl & S3C_DxEPCTL_EPType_MASK;
+		if (xfertype == S3C_DxEPCTL_EPType_Bulk ||
+			xfertype == S3C_DxEPCTL_EPType_Intterupt)
+				epctl |= S3C_DxEPCTL_SetD0PID;
+	}
+
+	writel(epctl, hs->regs + epreg);
+
+	epreg = S3C_DOEPCTL(index);
+	epctl = readl(hs->regs + epreg);
+
+	if (value)
+		epctl |= S3C_DxEPCTL_Stall;
+	else {
+		epctl &= ~S3C_DxEPCTL_Stall;
+		xfertype = epctl & S3C_DxEPCTL_EPType_MASK;
+		if (xfertype == S3C_DxEPCTL_EPType_Bulk ||
+			xfertype == S3C_DxEPCTL_EPType_Intterupt)
+				epctl |= S3C_DxEPCTL_SetD0PID;
+	}
+
+	writel(epctl, hs->regs + epreg);
+
+	spin_unlock_irqrestore(&hs_ep->lock, irqflags);
+
+	return 0;
+}
+
+static struct usb_ep_ops s3c_hsotg_ep_ops = {
+	.enable		= s3c_hsotg_ep_enable,
+	.disable	= s3c_hsotg_ep_disable,
+	.alloc_request	= s3c_hsotg_ep_alloc_request,
+	.free_request	= s3c_hsotg_ep_free_request,
+	.queue		= s3c_hsotg_ep_queue,
+	.dequeue	= s3c_hsotg_ep_dequeue,
+	.set_halt	= s3c_hsotg_ep_sethalt,
+	/* note, don't believe we have any call for the fifo routines */
+};
+
+/**
+ * s3c_hsotg_corereset - issue softreset to the core
+ * @hsotg: The device state
+ *
+ * Issue a soft reset to the core, and await the core finishing it.
+*/
+static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
+{
+	int timeout;
+	u32 grstctl;
+
+	dev_dbg(hsotg->dev, "resetting core\n");
+
+	/* issue soft reset */
+	writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL);
+
+	timeout = 1000;
+	do {
+		grstctl = readl(hsotg->regs + S3C_GRSTCTL);
+	} while ((grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);
+
+	if (grstctl & S3C_GRSTCTL_CSftRst) {
+		dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
+		return -EINVAL;
+	}
+
+	timeout = 1000;
+
+	while (1) {
+		u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL);
+
+		if (timeout-- < 0) {
+			dev_info(hsotg->dev,
+				 "%s: reset failed, GRSTCTL=%08x\n",
+				 __func__, grstctl);
+			return -ETIMEDOUT;
+		}
+
+		if (!(grstctl & S3C_GRSTCTL_AHBIdle))
+			continue;
+
+		break;		/* reset done */
+	}
+
+	dev_dbg(hsotg->dev, "reset successful\n");
+	return 0;
+}
+
+static int s3c_hsotg_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct s3c_hsotg *hsotg = our_hsotg;
+	int ret;
+
+	if (!hsotg) {
+		printk(KERN_ERR "%s: called with no device\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!driver) {
+		dev_err(hsotg->dev, "%s: no driver\n", __func__);
+		return -EINVAL;
+	}
+
+	if (driver->max_speed < USB_SPEED_FULL)
+		dev_err(hsotg->dev, "%s: bad speed\n", __func__);
+
+	if (!bind || !driver->setup) {
+		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
+		return -EINVAL;
+	}
+
+	WARN_ON(hsotg->driver);
+
+	driver->driver.bus = NULL;
+	hsotg->driver = driver;
+	hsotg->gadget.dev.driver = &driver->driver;
+	hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
+	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+	ret = device_add(&hsotg->gadget.dev);
+	if (ret) {
+		dev_err(hsotg->dev, "failed to register gadget device\n");
+		goto err;
+	}
+
+	ret = bind(&hsotg->gadget);
+	if (ret) {
+		dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name);
+
+		hsotg->gadget.dev.driver = NULL;
+		hsotg->driver = NULL;
+		goto err;
+	}
+
+	/* we must now enable ep0 ready for host detection and then
+	 * set configuration. */
+
+	s3c_hsotg_corereset(hsotg);
+
+	/* set the PLL on, remove the HNP/SRP and set the PHY */
+	writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) |
+	       (0x5 << 10), hsotg->regs + S3C_GUSBCFG);
+
+	/* looks like soft-reset changes state of FIFOs */
+	s3c_hsotg_init_fifo(hsotg);
+
+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+	writel(1 << 18 | S3C_DCFG_DevSpd_HS,  hsotg->regs + S3C_DCFG);
+
+	/* Clear any pending OTG interrupts */
+	writel(0xffffffff, hsotg->regs + S3C_GOTGINT);
+
+	/* Clear any pending interrupts */
+	writel(0xffffffff, hsotg->regs + S3C_GINTSTS);
+
+	writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt |
+	       S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst |
+	       S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt |
+	       S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt |
+	       S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff |
+	       S3C_GINTSTS_ErlySusp,
+	       hsotg->regs + S3C_GINTMSK);
+
+	if (using_dma(hsotg))
+		writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn |
+		       S3C_GAHBCFG_HBstLen_Incr4,
+		       hsotg->regs + S3C_GAHBCFG);
+	else
+		writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG);
+
+	/* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
+	 * up being flooded with interrupts if the host is polling the
+	 * endpoint to try and read data. */
+
+	writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
+	       S3C_DIEPMSK_INTknEPMisMsk |
+	       S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk |
+	       ((hsotg->dedicated_fifos) ? S3C_DIEPMSK_TxFIFOEmpty : 0),
+	       hsotg->regs + S3C_DIEPMSK);
+
+	/* don't need XferCompl, we get that from RXFIFO in slave mode. In
+	 * DMA mode we may need this. */
+	writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
+	       S3C_DOEPMSK_EPDisbldMsk |
+	       (using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
+				   S3C_DIEPMSK_TimeOUTMsk) : 0),
+	       hsotg->regs + S3C_DOEPMSK);
+
+	writel(0, hsotg->regs + S3C_DAINTMSK);
+
+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+		readl(hsotg->regs + S3C_DIEPCTL0),
+		readl(hsotg->regs + S3C_DOEPCTL0));
+
+	/* enable in and out endpoint interrupts */
+	s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
+
+	/* Enable the RXFIFO when in slave mode, as this is how we collect
+	 * the data. In DMA mode, we get events from the FIFO but also
+	 * things we cannot process, so do not use it. */
+	if (!using_dma(hsotg))
+		s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl);
+
+	/* Enable interrupts for EP0 in and out */
+	s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
+	s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
+
+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
+	udelay(10);  /* see openiboot */
+	__bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
+
+	dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
+
+	/* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
+	   writing to the EPCTL register.. */
+
+	/* set to read 1 8byte packet */
+	writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
+	       S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
+
+	writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+	       S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna |
+	       S3C_DxEPCTL_USBActEp,
+	       hsotg->regs + S3C_DOEPCTL0);
+
+	/* enable, but don't activate EP0in */
+	writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+	       S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0);
+
+	s3c_hsotg_enqueue_setup(hsotg);
+
+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+		readl(hsotg->regs + S3C_DIEPCTL0),
+		readl(hsotg->regs + S3C_DOEPCTL0));
+
+	/* clear global NAKs */
+	writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
+	       hsotg->regs + S3C_DCTL);
+
+	/* must be at-least 3ms to allow bus to see disconnect */
+	msleep(3);
+
+	/* remove the soft-disconnect and let's go */
+	__bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+	/* report to the user, and return */
+
+	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
+	return 0;
+
+err:
+	hsotg->driver = NULL;
+	hsotg->gadget.dev.driver = NULL;
+	return ret;
+}
+
+static int s3c_hsotg_stop(struct usb_gadget_driver *driver)
+{
+	struct s3c_hsotg *hsotg = our_hsotg;
+	int ep;
+
+	if (!hsotg)
+		return -ENODEV;
+
+	if (!driver || driver != hsotg->driver || !driver->unbind)
+		return -EINVAL;
+
+	/* all endpoints should be shutdown */
+	for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
+		s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+
+	call_gadget(hsotg, disconnect);
+
+	driver->unbind(&hsotg->gadget);
+	hsotg->driver = NULL;
+	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+	device_del(&hsotg->gadget.dev);
+
+	dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
+		 driver->driver.name);
+
+	return 0;
+}
+
+static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
+{
+	return s3c_hsotg_read_frameno(to_hsotg(gadget));
+}
+
+static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
+	.get_frame	= s3c_hsotg_gadget_getframe,
+	.start		= s3c_hsotg_start,
+	.stop		= s3c_hsotg_stop,
+};
+
+/**
+ * s3c_hsotg_initep - initialise a single endpoint
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint to be initialised.
+ * @epnum: The endpoint number
+ *
+ * Initialise the given endpoint (as part of the probe and device state
+ * creation) to give to the gadget driver. Setup the endpoint name, any
+ * direction information and other state that may be required.
+ */
+static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
+				       struct s3c_hsotg_ep *hs_ep,
+				       int epnum)
+{
+	u32 ptxfifo;
+	char *dir;
+
+	if (epnum == 0)
+		dir = "";
+	else if ((epnum % 2) == 0) {
+		dir = "out";
+	} else {
+		dir = "in";
+		hs_ep->dir_in = 1;
+	}
+
+	hs_ep->index = epnum;
+
+	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
+
+	INIT_LIST_HEAD(&hs_ep->queue);
+	INIT_LIST_HEAD(&hs_ep->ep.ep_list);
+
+	spin_lock_init(&hs_ep->lock);
+
+	/* add to the list of endpoints known by the gadget driver */
+	if (epnum)
+		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
+
+	hs_ep->parent = hsotg;
+	hs_ep->ep.name = hs_ep->name;
+	hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
+	hs_ep->ep.ops = &s3c_hsotg_ep_ops;
+
+	/* Read the FIFO size for the Periodic TX FIFO, even if we're
+	 * an OUT endpoint, we may as well do this if in future the
+	 * code is changed to make each endpoint's direction changeable.
+	 */
+
+	ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum));
+	hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4;
+
+	/* if we're using dma, we need to set the next-endpoint pointer
+	 * to be something valid.
+	 */
+
+	if (using_dma(hsotg)) {
+		u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15);
+		writel(next, hsotg->regs + S3C_DIEPCTL(epnum));
+		writel(next, hsotg->regs + S3C_DOEPCTL(epnum));
+	}
+}
+
+/**
+ * s3c_hsotg_otgreset - reset the OtG phy block
+ * @hsotg: The host state.
+ *
+ * Power up the phy, set the basic configuration and start the PHY.
+ */
+static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)
+{
+	struct clk *xusbxti;
+	u32 pwr, osc;
+
+	pwr = readl(S3C_PHYPWR);
+	pwr &= ~0x19;
+	writel(pwr, S3C_PHYPWR);
+	mdelay(1);
+
+	osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0;
+
+	xusbxti = clk_get(hsotg->dev, "xusbxti");
+	if (xusbxti && !IS_ERR(xusbxti)) {
+		switch (clk_get_rate(xusbxti)) {
+		case 12*MHZ:
+			osc |= S3C_PHYCLK_CLKSEL_12M;
+			break;
+		case 24*MHZ:
+			osc |= S3C_PHYCLK_CLKSEL_24M;
+			break;
+		default:
+		case 48*MHZ:
+			/* default reference clock */
+			break;
+		}
+		clk_put(xusbxti);
+	}
+
+	writel(osc | 0x10, S3C_PHYCLK);
+
+	/* issue a full set of resets to the otg and core */
+
+	writel(S3C_RSTCON_PHY, S3C_RSTCON);
+	udelay(20);	/* at-least 10uS */
+	writel(0, S3C_RSTCON);
+}
+
+
+static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
+{
+	u32 cfg4;
+
+	/* unmask subset of endpoint interrupts */
+
+	writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
+	       S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
+	       hsotg->regs + S3C_DIEPMSK);
+
+	writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
+	       S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk,
+	       hsotg->regs + S3C_DOEPMSK);
+
+	writel(0, hsotg->regs + S3C_DAINTMSK);
+
+	/* Be in disconnected state until gadget is registered */
+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+	if (0) {
+		/* post global nak until we're ready */
+		writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak,
+		       hsotg->regs + S3C_DCTL);
+	}
+
+	/* setup fifos */
+
+	dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+		readl(hsotg->regs + S3C_GRXFSIZ),
+		readl(hsotg->regs + S3C_GNPTXFSIZ));
+
+	s3c_hsotg_init_fifo(hsotg);
+
+	/* set the PLL on, remove the HNP/SRP and set the PHY */
+	writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10),
+	       hsotg->regs + S3C_GUSBCFG);
+
+	writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0,
+	       hsotg->regs + S3C_GAHBCFG);
+
+	/* check hardware configuration */
+
+	cfg4 = readl(hsotg->regs + 0x50);
+	hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
+
+	dev_info(hsotg->dev, "%s fifos\n",
+		 hsotg->dedicated_fifos ? "dedicated" : "shared");
+}
+
+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
+{
+#ifdef DEBUG
+	struct device *dev = hsotg->dev;
+	void __iomem *regs = hsotg->regs;
+	u32 val;
+	int idx;
+
+	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
+		 readl(regs + S3C_DCFG), readl(regs + S3C_DCTL),
+		 readl(regs + S3C_DIEPMSK));
+
+	dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
+		 readl(regs + S3C_GAHBCFG), readl(regs + 0x44));
+
+	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+		 readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ));
+
+	/* show periodic fifo settings */
+
+	for (idx = 1; idx <= 15; idx++) {
+		val = readl(regs + S3C_DPTXFSIZn(idx));
+		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
+			 val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
+			 val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
+	}
+
+	for (idx = 0; idx < 15; idx++) {
+		dev_info(dev,
+			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
+			 readl(regs + S3C_DIEPCTL(idx)),
+			 readl(regs + S3C_DIEPTSIZ(idx)),
+			 readl(regs + S3C_DIEPDMA(idx)));
+
+		val = readl(regs + S3C_DOEPCTL(idx));
+		dev_info(dev,
+			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
+			 idx, readl(regs + S3C_DOEPCTL(idx)),
+			 readl(regs + S3C_DOEPTSIZ(idx)),
+			 readl(regs + S3C_DOEPDMA(idx)));
+
+	}
+
+	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
+		 readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
+#endif
+}
+
+
+/**
+ * state_show - debugfs: show overall driver and device state.
+ * @seq: The seq file to write to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the overall state of the hardware and
+ * some general information about each of the endpoints available
+ * to the system.
+ */
+static int state_show(struct seq_file *seq, void *v)
+{
+	struct s3c_hsotg *hsotg = seq->private;
+	void __iomem *regs = hsotg->regs;
+	int idx;
+
+	seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
+		 readl(regs + S3C_DCFG),
+		 readl(regs + S3C_DCTL),
+		 readl(regs + S3C_DSTS));
+
+	seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
+		   readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK));
+
+	seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
+		   readl(regs + S3C_GINTMSK),
+		   readl(regs + S3C_GINTSTS));
+
+	seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
+		   readl(regs + S3C_DAINTMSK),
+		   readl(regs + S3C_DAINT));
+
+	seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
+		   readl(regs + S3C_GNPTXSTS),
+		   readl(regs + S3C_GRXSTSR));
+
+	seq_printf(seq, "\nEndpoint status:\n");
+
+	for (idx = 0; idx < 15; idx++) {
+		u32 in, out;
+
+		in = readl(regs + S3C_DIEPCTL(idx));
+		out = readl(regs + S3C_DOEPCTL(idx));
+
+		seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
+			   idx, in, out);
+
+		in = readl(regs + S3C_DIEPTSIZ(idx));
+		out = readl(regs + S3C_DOEPTSIZ(idx));
+
+		seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
+			   in, out);
+
+		seq_printf(seq, "\n");
+	}
+
+	return 0;
+}
+
+static int state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, state_show, inode->i_private);
+}
+
+static const struct file_operations state_fops = {
+	.owner		= THIS_MODULE,
+	.open		= state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/**
+ * fifo_show - debugfs: show the fifo information
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * Show the FIFO information for the overall fifo and all the
+ * periodic transmission FIFOs.
+*/
+static int fifo_show(struct seq_file *seq, void *v)
+{
+	struct s3c_hsotg *hsotg = seq->private;
+	void __iomem *regs = hsotg->regs;
+	u32 val;
+	int idx;
+
+	seq_printf(seq, "Non-periodic FIFOs:\n");
+	seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ));
+
+	val = readl(regs + S3C_GNPTXFSIZ);
+	seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
+		   val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT,
+		   val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK);
+
+	seq_printf(seq, "\nPeriodic TXFIFOs:\n");
+
+	for (idx = 1; idx <= 15; idx++) {
+		val = readl(regs + S3C_DPTXFSIZn(idx));
+
+		seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
+			   val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
+			   val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
+	}
+
+	return 0;
+}
+
+static int fifo_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, fifo_show, inode->i_private);
+}
+
+static const struct file_operations fifo_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fifo_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+
+static const char *decode_direction(int is_in)
+{
+	return is_in ? "in" : "out";
+}
+
+/**
+ * ep_show - debugfs: show the state of an endpoint.
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the state of the given endpoint (one is
+ * registered for each available).
+*/
+static int ep_show(struct seq_file *seq, void *v)
+{
+	struct s3c_hsotg_ep *ep = seq->private;
+	struct s3c_hsotg *hsotg = ep->parent;
+	struct s3c_hsotg_req *req;
+	void __iomem *regs = hsotg->regs;
+	int index = ep->index;
+	int show_limit = 15;
+	unsigned long flags;
+
+	seq_printf(seq, "Endpoint index %d, named %s,  dir %s:\n",
+		   ep->index, ep->ep.name, decode_direction(ep->dir_in));
+
+	/* first show the register state */
+
+	seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
+		   readl(regs + S3C_DIEPCTL(index)),
+		   readl(regs + S3C_DOEPCTL(index)));
+
+	seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
+		   readl(regs + S3C_DIEPDMA(index)),
+		   readl(regs + S3C_DOEPDMA(index)));
+
+	seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
+		   readl(regs + S3C_DIEPINT(index)),
+		   readl(regs + S3C_DOEPINT(index)));
+
+	seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
+		   readl(regs + S3C_DIEPTSIZ(index)),
+		   readl(regs + S3C_DOEPTSIZ(index)));
+
+	seq_printf(seq, "\n");
+	seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
+	seq_printf(seq, "total_data=%ld\n", ep->total_data);
+
+	seq_printf(seq, "request list (%p,%p):\n",
+		   ep->queue.next, ep->queue.prev);
+
+	spin_lock_irqsave(&ep->lock, flags);
+
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (--show_limit < 0) {
+			seq_printf(seq, "not showing more requests...\n");
+			break;
+		}
+
+		seq_printf(seq, "%c req %p: %d bytes @%p, ",
+			   req == ep->req ? '*' : ' ',
+			   req, req->req.length, req->req.buf);
+		seq_printf(seq, "%d done, res %d\n",
+			   req->req.actual, req->req.status);
+	}
+
+	spin_unlock_irqrestore(&ep->lock, flags);
+
+	return 0;
+}
+
+static int ep_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ep_show, inode->i_private);
+}
+
+static const struct file_operations ep_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ep_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/**
+ * s3c_hsotg_create_debug - create debugfs directory and files
+ * @hsotg: The driver state
+ *
+ * Create the debugfs files to allow the user to get information
+ * about the state of the system. The directory name is created
+ * with the same name as the device itself, in case we end up
+ * with multiple blocks in future systems.
+*/
+static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
+{
+	struct dentry *root;
+	unsigned epidx;
+
+	root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
+	hsotg->debug_root = root;
+	if (IS_ERR(root)) {
+		dev_err(hsotg->dev, "cannot create debug root\n");
+		return;
+	}
+
+	/* create general state file */
+
+	hsotg->debug_file = debugfs_create_file("state", 0444, root,
+						hsotg, &state_fops);
+
+	if (IS_ERR(hsotg->debug_file))
+		dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
+
+	hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
+						hsotg, &fifo_fops);
+
+	if (IS_ERR(hsotg->debug_fifo))
+		dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
+
+	/* create one file for each endpoint */
+
+	for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
+		struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+
+		ep->debugfs = debugfs_create_file(ep->name, 0444,
+						  root, ep, &ep_fops);
+
+		if (IS_ERR(ep->debugfs))
+			dev_err(hsotg->dev, "failed to create %s debug file\n",
+				ep->name);
+	}
+}
+
+/**
+ * s3c_hsotg_delete_debug - cleanup debugfs entries
+ * @hsotg: The driver state
+ *
+ * Cleanup (remove) the debugfs files for use on module exit.
+*/
+static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
+{
+	unsigned epidx;
+
+	for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
+		struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+		debugfs_remove(ep->debugfs);
+	}
+
+	debugfs_remove(hsotg->debug_file);
+	debugfs_remove(hsotg->debug_fifo);
+	debugfs_remove(hsotg->debug_root);
+}
+
+/**
+ * s3c_hsotg_gate - set the hardware gate for the block
+ * @pdev: The device we bound to
+ * @on: On or off.
+ *
+ * Set the hardware gate setting into the block. If we end up on
+ * something other than an S3C64XX, then we might need to change this
+ * to using a platform data callback, or some other mechanism.
+ */
+static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
+{
+	unsigned long flags;
+	u32 others;
+
+	local_irq_save(flags);
+
+	others = __raw_readl(S3C64XX_OTHERS);
+	if (on)
+		others |= S3C64XX_OTHERS_USBMASK;
+	else
+		others &= ~S3C64XX_OTHERS_USBMASK;
+	__raw_writel(others, S3C64XX_OTHERS);
+
+	local_irq_restore(flags);
+}
+
+static struct s3c_hsotg_plat s3c_hsotg_default_pdata;
+
+static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
+{
+	struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
+	struct device *dev = &pdev->dev;
+	struct s3c_hsotg *hsotg;
+	struct resource *res;
+	int epnum;
+	int ret;
+
+	if (!plat)
+		plat = &s3c_hsotg_default_pdata;
+
+	hsotg = kzalloc(sizeof(struct s3c_hsotg) +
+			sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS,
+			GFP_KERNEL);
+	if (!hsotg) {
+		dev_err(dev, "cannot get memory\n");
+		return -ENOMEM;
+	}
+
+	hsotg->dev = dev;
+	hsotg->plat = plat;
+
+	hsotg->clk = clk_get(&pdev->dev, "otg");
+	if (IS_ERR(hsotg->clk)) {
+		dev_err(dev, "cannot get otg clock\n");
+		ret = PTR_ERR(hsotg->clk);
+		goto err_mem;
+	}
+
+	platform_set_drvdata(pdev, hsotg);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "cannot find register resource 0\n");
+		ret = -EINVAL;
+		goto err_clk;
+	}
+
+	hsotg->regs_res = request_mem_region(res->start, resource_size(res),
+					     dev_name(dev));
+	if (!hsotg->regs_res) {
+		dev_err(dev, "cannot reserve registers\n");
+		ret = -ENOENT;
+		goto err_clk;
+	}
+
+	hsotg->regs = ioremap(res->start, resource_size(res));
+	if (!hsotg->regs) {
+		dev_err(dev, "cannot map registers\n");
+		ret = -ENXIO;
+		goto err_regs_res;
+	}
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(dev, "cannot find IRQ\n");
+		goto err_regs;
+	}
+
+	hsotg->irq = ret;
+
+	ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg);
+	if (ret < 0) {
+		dev_err(dev, "cannot claim IRQ\n");
+		goto err_regs;
+	}
+
+	dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
+
+	device_initialize(&hsotg->gadget.dev);
+
+	dev_set_name(&hsotg->gadget.dev, "gadget");
+
+	hsotg->gadget.max_speed = USB_SPEED_HIGH;
+	hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
+	hsotg->gadget.name = dev_name(dev);
+
+	hsotg->gadget.dev.parent = dev;
+	hsotg->gadget.dev.dma_mask = dev->dma_mask;
+
+	/* setup endpoint information */
+
+	INIT_LIST_HEAD(&hsotg->gadget.ep_list);
+	hsotg->gadget.ep0 = &hsotg->eps[0].ep;
+
+	/* allocate EP0 request */
+
+	hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
+						     GFP_KERNEL);
+	if (!hsotg->ctrl_req) {
+		dev_err(dev, "failed to allocate ctrl req\n");
+		goto err_regs;
+	}
+
+	/* reset the system */
+
+	clk_enable(hsotg->clk);
+
+	s3c_hsotg_gate(pdev, true);
+
+	s3c_hsotg_otgreset(hsotg);
+	s3c_hsotg_corereset(hsotg);
+	s3c_hsotg_init(hsotg);
+
+	/* initialise the endpoints now the core has been initialised */
+	for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++)
+		s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
+
+	ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);
+	if (ret)
+		goto err_add_udc;
+
+	s3c_hsotg_create_debug(hsotg);
+
+	s3c_hsotg_dump(hsotg);
+
+	our_hsotg = hsotg;
+	return 0;
+
+err_add_udc:
+	s3c_hsotg_gate(pdev, false);
+	clk_disable(hsotg->clk);
+	clk_put(hsotg->clk);
+
+err_regs:
+	iounmap(hsotg->regs);
+
+err_regs_res:
+	release_resource(hsotg->regs_res);
+	kfree(hsotg->regs_res);
+err_clk:
+	clk_put(hsotg->clk);
+err_mem:
+	kfree(hsotg);
+	return ret;
+}
+
+static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
+{
+	struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+
+	usb_del_gadget_udc(&hsotg->gadget);
+
+	s3c_hsotg_delete_debug(hsotg);
+
+	usb_gadget_unregister_driver(hsotg->driver);
+
+	free_irq(hsotg->irq, hsotg);
+	iounmap(hsotg->regs);
+
+	release_resource(hsotg->regs_res);
+	kfree(hsotg->regs_res);
+
+	s3c_hsotg_gate(pdev, false);
+
+	clk_disable(hsotg->clk);
+	clk_put(hsotg->clk);
+
+	kfree(hsotg);
+	return 0;
+}
+
+#if 1
+#define s3c_hsotg_suspend NULL
+#define s3c_hsotg_resume NULL
+#endif
+
+static struct platform_driver s3c_hsotg_driver = {
+	.driver		= {
+		.name	= "s3c-hsotg",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= s3c_hsotg_probe,
+	.remove		= __devexit_p(s3c_hsotg_remove),
+	.suspend	= s3c_hsotg_suspend,
+	.resume		= s3c_hsotg_resume,
+};
+
+module_platform_driver(s3c_hsotg_driver);
+
+MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsotg");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c-hsudc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c-hsudc.c
new file mode 100644
index 0000000..cef9b82
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c-hsudc.c
@@ -0,0 +1,1413 @@
+/* linux/drivers/usb/gadget/s3c-hsudc.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * S3C24XX USB 2.0 High-speed USB controller gadget driver
+ *
+ * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
+ * Each endpoint can be configured as either in or out endpoint. Endpoints
+ * can be configured for Bulk or Interrupt transfer mode.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/prefetch.h>
+#include <linux/platform_data/s3c-hsudc.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/regs-s3c2443-clock.h>
+
+#define S3C_HSUDC_REG(x)	(x)
+
+/* Non-Indexed Registers */
+#define S3C_IR				S3C_HSUDC_REG(0x00) /* Index Register */
+#define S3C_EIR				S3C_HSUDC_REG(0x04) /* EP Intr Status */
+#define S3C_EIR_EP0			(1<<0)
+#define S3C_EIER			S3C_HSUDC_REG(0x08) /* EP Intr Enable */
+#define S3C_FAR				S3C_HSUDC_REG(0x0c) /* Gadget Address */
+#define S3C_FNR				S3C_HSUDC_REG(0x10) /* Frame Number */
+#define S3C_EDR				S3C_HSUDC_REG(0x14) /* EP Direction */
+#define S3C_TR				S3C_HSUDC_REG(0x18) /* Test Register */
+#define S3C_SSR				S3C_HSUDC_REG(0x1c) /* System Status */
+#define S3C_SSR_DTZIEN_EN		(0xff8f)
+#define S3C_SSR_ERR			(0xff80)
+#define S3C_SSR_VBUSON			(1 << 8)
+#define S3C_SSR_HSP			(1 << 4)
+#define S3C_SSR_SDE			(1 << 3)
+#define S3C_SSR_RESUME			(1 << 2)
+#define S3C_SSR_SUSPEND			(1 << 1)
+#define S3C_SSR_RESET			(1 << 0)
+#define S3C_SCR				S3C_HSUDC_REG(0x20) /* System Control */
+#define S3C_SCR_DTZIEN_EN		(1 << 14)
+#define S3C_SCR_RRD_EN			(1 << 5)
+#define S3C_SCR_SUS_EN			(1 << 1)
+#define S3C_SCR_RST_EN			(1 << 0)
+#define S3C_EP0SR			S3C_HSUDC_REG(0x24) /* EP0 Status */
+#define S3C_EP0SR_EP0_LWO		(1 << 6)
+#define S3C_EP0SR_STALL			(1 << 4)
+#define S3C_EP0SR_TX_SUCCESS		(1 << 1)
+#define S3C_EP0SR_RX_SUCCESS		(1 << 0)
+#define S3C_EP0CR			S3C_HSUDC_REG(0x28) /* EP0 Control */
+#define S3C_BR(_x)			S3C_HSUDC_REG(0x60 + (_x * 4))
+
+/* Indexed Registers */
+#define S3C_ESR				S3C_HSUDC_REG(0x2c) /* EPn Status */
+#define S3C_ESR_FLUSH			(1 << 6)
+#define S3C_ESR_STALL			(1 << 5)
+#define S3C_ESR_LWO			(1 << 4)
+#define S3C_ESR_PSIF_ONE		(1 << 2)
+#define S3C_ESR_PSIF_TWO		(2 << 2)
+#define S3C_ESR_TX_SUCCESS		(1 << 1)
+#define S3C_ESR_RX_SUCCESS		(1 << 0)
+#define S3C_ECR				S3C_HSUDC_REG(0x30) /* EPn Control */
+#define S3C_ECR_DUEN			(1 << 7)
+#define S3C_ECR_FLUSH			(1 << 6)
+#define S3C_ECR_STALL			(1 << 1)
+#define S3C_ECR_IEMS			(1 << 0)
+#define S3C_BRCR			S3C_HSUDC_REG(0x34) /* Read Count */
+#define S3C_BWCR			S3C_HSUDC_REG(0x38) /* Write Count */
+#define S3C_MPR				S3C_HSUDC_REG(0x3c) /* Max Pkt Size */
+
+#define WAIT_FOR_SETUP			(0)
+#define DATA_STATE_XMIT			(1)
+#define DATA_STATE_RECV			(2)
+
+static const char * const s3c_hsudc_supply_names[] = {
+	"vdda",		/* analog phy supply, 3.3V */
+	"vddi",		/* digital phy supply, 1.2V */
+	"vddosc",	/* oscillator supply, 1.8V - 3.3V */
+};
+
+/**
+ * struct s3c_hsudc_ep - Endpoint representation used by driver.
+ * @ep: USB gadget layer representation of device endpoint.
+ * @name: Endpoint name (as required by ep autoconfiguration).
+ * @dev: Reference to the device controller to which this EP belongs.
+ * @desc: Endpoint descriptor obtained from the gadget driver.
+ * @queue: Transfer request queue for the endpoint.
+ * @stopped: Maintains state of endpoint, set if EP is halted.
+ * @bEndpointAddress: EP address (including direction bit).
+ * @fifo: Base address of EP FIFO.
+ */
+struct s3c_hsudc_ep {
+	struct usb_ep ep;
+	char name[20];
+	struct s3c_hsudc *dev;
+	const struct usb_endpoint_descriptor *desc;
+	struct list_head queue;
+	u8 stopped;
+	u8 wedge;
+	u8 bEndpointAddress;
+	void __iomem *fifo;
+};
+
+/**
+ * struct s3c_hsudc_req - Driver encapsulation of USB gadget transfer request.
+ * @req: Reference to USB gadget transfer request.
+ * @queue: Used for inserting this request to the endpoint request queue.
+ */
+struct s3c_hsudc_req {
+	struct usb_request req;
+	struct list_head queue;
+};
+
+/**
+ * struct s3c_hsudc - Driver's abstraction of the device controller.
+ * @gadget: Instance of usb_gadget which is referenced by gadget driver.
+ * @driver: Reference to currenty active gadget driver.
+ * @dev: The device reference used by probe function.
+ * @lock: Lock to synchronize the usage of Endpoints (EP's are indexed).
+ * @regs: Remapped base address of controller's register space.
+ * @mem_rsrc: Device memory resource used for remapping device register space.
+ * irq: IRQ number used by the controller.
+ * uclk: Reference to the controller clock.
+ * ep0state: Current state of EP0.
+ * ep: List of endpoints supported by the controller.
+ */
+struct s3c_hsudc {
+	struct usb_gadget gadget;
+	struct usb_gadget_driver *driver;
+	struct device *dev;
+	struct s3c24xx_hsudc_platdata *pd;
+	struct usb_phy *transceiver;
+	struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsudc_supply_names)];
+	spinlock_t lock;
+	void __iomem *regs;
+	struct resource *mem_rsrc;
+	int irq;
+	struct clk *uclk;
+	int ep0state;
+	struct s3c_hsudc_ep ep[];
+};
+
+#define ep_maxpacket(_ep)	((_ep)->ep.maxpacket)
+#define ep_is_in(_ep)		((_ep)->bEndpointAddress & USB_DIR_IN)
+#define ep_index(_ep)		((_ep)->bEndpointAddress & \
+					USB_ENDPOINT_NUMBER_MASK)
+
+static const char driver_name[] = "s3c-udc";
+static const char ep0name[] = "ep0-control";
+
+static inline struct s3c_hsudc_req *our_req(struct usb_request *req)
+{
+	return container_of(req, struct s3c_hsudc_req, req);
+}
+
+static inline struct s3c_hsudc_ep *our_ep(struct usb_ep *ep)
+{
+	return container_of(ep, struct s3c_hsudc_ep, ep);
+}
+
+static inline struct s3c_hsudc *to_hsudc(struct usb_gadget *gadget)
+{
+	return container_of(gadget, struct s3c_hsudc, gadget);
+}
+
+static inline void set_index(struct s3c_hsudc *hsudc, int ep_addr)
+{
+	ep_addr &= USB_ENDPOINT_NUMBER_MASK;
+	writel(ep_addr, hsudc->regs + S3C_IR);
+}
+
+static inline void __orr32(void __iomem *ptr, u32 val)
+{
+	writel(readl(ptr) | val, ptr);
+}
+
+static void s3c_hsudc_init_phy(void)
+{
+	u32 cfg;
+
+	cfg = readl(S3C2443_PWRCFG) | S3C2443_PWRCFG_USBPHY;
+	writel(cfg, S3C2443_PWRCFG);
+
+	cfg = readl(S3C2443_URSTCON);
+	cfg |= (S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST);
+	writel(cfg, S3C2443_URSTCON);
+	mdelay(1);
+
+	cfg = readl(S3C2443_URSTCON);
+	cfg &= ~(S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST);
+	writel(cfg, S3C2443_URSTCON);
+
+	cfg = readl(S3C2443_PHYCTRL);
+	cfg &= ~(S3C2443_PHYCTRL_CLKSEL | S3C2443_PHYCTRL_DSPORT);
+	cfg |= (S3C2443_PHYCTRL_EXTCLK | S3C2443_PHYCTRL_PLLSEL);
+	writel(cfg, S3C2443_PHYCTRL);
+
+	cfg = readl(S3C2443_PHYPWR);
+	cfg &= ~(S3C2443_PHYPWR_FSUSPEND | S3C2443_PHYPWR_PLL_PWRDN |
+		S3C2443_PHYPWR_XO_ON | S3C2443_PHYPWR_PLL_REFCLK |
+		S3C2443_PHYPWR_ANALOG_PD);
+	cfg |= S3C2443_PHYPWR_COMMON_ON;
+	writel(cfg, S3C2443_PHYPWR);
+
+	cfg = readl(S3C2443_UCLKCON);
+	cfg |= (S3C2443_UCLKCON_DETECT_VBUS | S3C2443_UCLKCON_FUNC_CLKEN |
+		S3C2443_UCLKCON_TCLKEN);
+	writel(cfg, S3C2443_UCLKCON);
+}
+
+static void s3c_hsudc_uninit_phy(void)
+{
+	u32 cfg;
+
+	cfg = readl(S3C2443_PWRCFG) & ~S3C2443_PWRCFG_USBPHY;
+	writel(cfg, S3C2443_PWRCFG);
+
+	writel(S3C2443_PHYPWR_FSUSPEND, S3C2443_PHYPWR);
+
+	cfg = readl(S3C2443_UCLKCON) & ~S3C2443_UCLKCON_FUNC_CLKEN;
+	writel(cfg, S3C2443_UCLKCON);
+}
+
+/**
+ * s3c_hsudc_complete_request - Complete a transfer request.
+ * @hsep: Endpoint to which the request belongs.
+ * @hsreq: Transfer request to be completed.
+ * @status: Transfer completion status for the transfer request.
+ */
+static void s3c_hsudc_complete_request(struct s3c_hsudc_ep *hsep,
+				struct s3c_hsudc_req *hsreq, int status)
+{
+	unsigned int stopped = hsep->stopped;
+	struct s3c_hsudc *hsudc = hsep->dev;
+
+	list_del_init(&hsreq->queue);
+	hsreq->req.status = status;
+
+	if (!ep_index(hsep)) {
+		hsudc->ep0state = WAIT_FOR_SETUP;
+		hsep->bEndpointAddress &= ~USB_DIR_IN;
+	}
+
+	hsep->stopped = 1;
+	spin_unlock(&hsudc->lock);
+	if (hsreq->req.complete != NULL)
+		hsreq->req.complete(&hsep->ep, &hsreq->req);
+	spin_lock(&hsudc->lock);
+	hsep->stopped = stopped;
+}
+
+/**
+ * s3c_hsudc_nuke_ep - Terminate all requests queued for a endpoint.
+ * @hsep: Endpoint for which queued requests have to be terminated.
+ * @status: Transfer completion status for the transfer request.
+ */
+static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status)
+{
+	struct s3c_hsudc_req *hsreq;
+
+	while (!list_empty(&hsep->queue)) {
+		hsreq = list_entry(hsep->queue.next,
+				struct s3c_hsudc_req, queue);
+		s3c_hsudc_complete_request(hsep, hsreq, status);
+	}
+}
+
+/**
+ * s3c_hsudc_stop_activity - Stop activity on all endpoints.
+ * @hsudc: Device controller for which EP activity is to be stopped.
+ * @driver: Reference to the gadget driver which is currently active.
+ *
+ * All the endpoints are stopped and any pending transfer requests if any on
+ * the endpoint are terminated.
+ */
+static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc)
+{
+	struct s3c_hsudc_ep *hsep;
+	int epnum;
+
+	hsudc->gadget.speed = USB_SPEED_UNKNOWN;
+
+	for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) {
+		hsep = &hsudc->ep[epnum];
+		hsep->stopped = 1;
+		s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
+	}
+}
+
+/**
+ * s3c_hsudc_read_setup_pkt - Read the received setup packet from EP0 fifo.
+ * @hsudc: Device controller from which setup packet is to be read.
+ * @buf: The buffer into which the setup packet is read.
+ *
+ * The setup packet received in the EP0 fifo is read and stored into a
+ * given buffer address.
+ */
+
+static void s3c_hsudc_read_setup_pkt(struct s3c_hsudc *hsudc, u16 *buf)
+{
+	int count;
+
+	count = readl(hsudc->regs + S3C_BRCR);
+	while (count--)
+		*buf++ = (u16)readl(hsudc->regs + S3C_BR(0));
+
+	writel(S3C_EP0SR_RX_SUCCESS, hsudc->regs + S3C_EP0SR);
+}
+
+/**
+ * s3c_hsudc_write_fifo - Write next chunk of transfer data to EP fifo.
+ * @hsep: Endpoint to which the data is to be written.
+ * @hsreq: Transfer request from which the next chunk of data is written.
+ *
+ * Write the next chunk of data from a transfer request to the endpoint FIFO.
+ * If the transfer request completes, 1 is returned, otherwise 0 is returned.
+ */
+static int s3c_hsudc_write_fifo(struct s3c_hsudc_ep *hsep,
+				struct s3c_hsudc_req *hsreq)
+{
+	u16 *buf;
+	u32 max = ep_maxpacket(hsep);
+	u32 count, length;
+	bool is_last;
+	void __iomem *fifo = hsep->fifo;
+
+	buf = hsreq->req.buf + hsreq->req.actual;
+	prefetch(buf);
+
+	length = hsreq->req.length - hsreq->req.actual;
+	length = min(length, max);
+	hsreq->req.actual += length;
+
+	writel(length, hsep->dev->regs + S3C_BWCR);
+	for (count = 0; count < length; count += 2)
+		writel(*buf++, fifo);
+
+	if (count != max) {
+		is_last = true;
+	} else {
+		if (hsreq->req.length != hsreq->req.actual || hsreq->req.zero)
+			is_last = false;
+		else
+			is_last = true;
+	}
+
+	if (is_last) {
+		s3c_hsudc_complete_request(hsep, hsreq, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * s3c_hsudc_read_fifo - Read the next chunk of data from EP fifo.
+ * @hsep: Endpoint from which the data is to be read.
+ * @hsreq: Transfer request to which the next chunk of data read is written.
+ *
+ * Read the next chunk of data from the endpoint FIFO and a write it to the
+ * transfer request buffer. If the transfer request completes, 1 is returned,
+ * otherwise 0 is returned.
+ */
+static int s3c_hsudc_read_fifo(struct s3c_hsudc_ep *hsep,
+				struct s3c_hsudc_req *hsreq)
+{
+	struct s3c_hsudc *hsudc = hsep->dev;
+	u32 csr, offset;
+	u16 *buf, word;
+	u32 buflen, rcnt, rlen;
+	void __iomem *fifo = hsep->fifo;
+	u32 is_short = 0;
+
+	offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
+	csr = readl(hsudc->regs + offset);
+	if (!(csr & S3C_ESR_RX_SUCCESS))
+		return -EINVAL;
+
+	buf = hsreq->req.buf + hsreq->req.actual;
+	prefetchw(buf);
+	buflen = hsreq->req.length - hsreq->req.actual;
+
+	rcnt = readl(hsudc->regs + S3C_BRCR);
+	rlen = (csr & S3C_ESR_LWO) ? (rcnt * 2 - 1) : (rcnt * 2);
+
+	hsreq->req.actual += min(rlen, buflen);
+	is_short = (rlen < hsep->ep.maxpacket);
+
+	while (rcnt-- != 0) {
+		word = (u16)readl(fifo);
+		if (buflen) {
+			*buf++ = word;
+			buflen--;
+		} else {
+			hsreq->req.status = -EOVERFLOW;
+		}
+	}
+
+	writel(S3C_ESR_RX_SUCCESS, hsudc->regs + offset);
+
+	if (is_short || hsreq->req.actual == hsreq->req.length) {
+		s3c_hsudc_complete_request(hsep, hsreq, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * s3c_hsudc_epin_intr - Handle in-endpoint interrupt.
+ * @hsudc - Device controller for which the interrupt is to be handled.
+ * @ep_idx - Endpoint number on which an interrupt is pending.
+ *
+ * Handles interrupt for a in-endpoint. The interrupts that are handled are
+ * stall and data transmit complete interrupt.
+ */
+static void s3c_hsudc_epin_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
+{
+	struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx];
+	struct s3c_hsudc_req *hsreq;
+	u32 csr;
+
+	csr = readl((u32)hsudc->regs + S3C_ESR);
+	if (csr & S3C_ESR_STALL) {
+		writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
+		return;
+	}
+
+	if (csr & S3C_ESR_TX_SUCCESS) {
+		writel(S3C_ESR_TX_SUCCESS, hsudc->regs + S3C_ESR);
+		if (list_empty(&hsep->queue))
+			return;
+
+		hsreq = list_entry(hsep->queue.next,
+				struct s3c_hsudc_req, queue);
+		if ((s3c_hsudc_write_fifo(hsep, hsreq) == 0) &&
+				(csr & S3C_ESR_PSIF_TWO))
+			s3c_hsudc_write_fifo(hsep, hsreq);
+	}
+}
+
+/**
+ * s3c_hsudc_epout_intr - Handle out-endpoint interrupt.
+ * @hsudc - Device controller for which the interrupt is to be handled.
+ * @ep_idx - Endpoint number on which an interrupt is pending.
+ *
+ * Handles interrupt for a out-endpoint. The interrupts that are handled are
+ * stall, flush and data ready interrupt.
+ */
+static void s3c_hsudc_epout_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
+{
+	struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx];
+	struct s3c_hsudc_req *hsreq;
+	u32 csr;
+
+	csr = readl((u32)hsudc->regs + S3C_ESR);
+	if (csr & S3C_ESR_STALL) {
+		writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
+		return;
+	}
+
+	if (csr & S3C_ESR_FLUSH) {
+		__orr32(hsudc->regs + S3C_ECR, S3C_ECR_FLUSH);
+		return;
+	}
+
+	if (csr & S3C_ESR_RX_SUCCESS) {
+		if (list_empty(&hsep->queue))
+			return;
+
+		hsreq = list_entry(hsep->queue.next,
+				struct s3c_hsudc_req, queue);
+		if (((s3c_hsudc_read_fifo(hsep, hsreq)) == 0) &&
+				(csr & S3C_ESR_PSIF_TWO))
+			s3c_hsudc_read_fifo(hsep, hsreq);
+	}
+}
+
+/** s3c_hsudc_set_halt - Set or clear a endpoint halt.
+ * @_ep: Endpoint on which halt has to be set or cleared.
+ * @value: 1 for setting halt on endpoint, 0 to clear halt.
+ *
+ * Set or clear endpoint halt. If halt is set, the endpoint is stopped.
+ * If halt is cleared, for in-endpoints, if there are any pending
+ * transfer requests, transfers are started.
+ */
+static int s3c_hsudc_set_halt(struct usb_ep *_ep, int value)
+{
+	struct s3c_hsudc_ep *hsep = our_ep(_ep);
+	struct s3c_hsudc *hsudc = hsep->dev;
+	struct s3c_hsudc_req *hsreq;
+	unsigned long irqflags;
+	u32 ecr;
+	u32 offset;
+
+	if (value && ep_is_in(hsep) && !list_empty(&hsep->queue))
+		return -EAGAIN;
+
+	spin_lock_irqsave(&hsudc->lock, irqflags);
+	set_index(hsudc, ep_index(hsep));
+	offset = (ep_index(hsep)) ? S3C_ECR : S3C_EP0CR;
+	ecr = readl(hsudc->regs + offset);
+
+	if (value) {
+		ecr |= S3C_ECR_STALL;
+		if (ep_index(hsep))
+			ecr |= S3C_ECR_FLUSH;
+		hsep->stopped = 1;
+	} else {
+		ecr &= ~S3C_ECR_STALL;
+		hsep->stopped = hsep->wedge = 0;
+	}
+	writel(ecr, hsudc->regs + offset);
+
+	if (ep_is_in(hsep) && !list_empty(&hsep->queue) && !value) {
+		hsreq = list_entry(hsep->queue.next,
+			struct s3c_hsudc_req, queue);
+		if (hsreq)
+			s3c_hsudc_write_fifo(hsep, hsreq);
+	}
+
+	spin_unlock_irqrestore(&hsudc->lock, irqflags);
+	return 0;
+}
+
+/** s3c_hsudc_set_wedge - Sets the halt feature with the clear requests ignored
+ * @_ep: Endpoint on which wedge has to be set.
+ *
+ * Sets the halt feature with the clear requests ignored.
+ */
+static int s3c_hsudc_set_wedge(struct usb_ep *_ep)
+{
+	struct s3c_hsudc_ep *hsep = our_ep(_ep);
+
+	if (!hsep)
+		return -EINVAL;
+
+	hsep->wedge = 1;
+	return usb_ep_set_halt(_ep);
+}
+
+/** s3c_hsudc_handle_reqfeat - Handle set feature or clear feature requests.
+ * @_ep: Device controller on which the set/clear feature needs to be handled.
+ * @ctrl: Control request as received on the endpoint 0.
+ *
+ * Handle set feature or clear feature control requests on the control endpoint.
+ */
+static int s3c_hsudc_handle_reqfeat(struct s3c_hsudc *hsudc,
+					struct usb_ctrlrequest *ctrl)
+{
+	struct s3c_hsudc_ep *hsep;
+	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+	u8 ep_num = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
+
+	if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
+		hsep = &hsudc->ep[ep_num];
+		switch (le16_to_cpu(ctrl->wValue)) {
+		case USB_ENDPOINT_HALT:
+			if (set || (!set && !hsep->wedge))
+				s3c_hsudc_set_halt(&hsep->ep, set);
+			return 0;
+		}
+	}
+
+	return -ENOENT;
+}
+
+/**
+ * s3c_hsudc_process_req_status - Handle get status control request.
+ * @hsudc: Device controller on which get status request has be handled.
+ * @ctrl: Control request as received on the endpoint 0.
+ *
+ * Handle get status control request received on control endpoint.
+ */
+static void s3c_hsudc_process_req_status(struct s3c_hsudc *hsudc,
+					struct usb_ctrlrequest *ctrl)
+{
+	struct s3c_hsudc_ep *hsep0 = &hsudc->ep[0];
+	struct s3c_hsudc_req hsreq;
+	struct s3c_hsudc_ep *hsep;
+	__le16 reply;
+	u8 epnum;
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		reply = cpu_to_le16(0);
+		break;
+
+	case USB_RECIP_INTERFACE:
+		reply = cpu_to_le16(0);
+		break;
+
+	case USB_RECIP_ENDPOINT:
+		epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+		hsep = &hsudc->ep[epnum];
+		reply = cpu_to_le16(hsep->stopped ? 1 : 0);
+		break;
+	}
+
+	INIT_LIST_HEAD(&hsreq.queue);
+	hsreq.req.length = 2;
+	hsreq.req.buf = &reply;
+	hsreq.req.actual = 0;
+	hsreq.req.complete = NULL;
+	s3c_hsudc_write_fifo(hsep0, &hsreq);
+}
+
+/**
+ * s3c_hsudc_process_setup - Process control request received on endpoint 0.
+ * @hsudc: Device controller on which control request has been received.
+ *
+ * Read the control request received on endpoint 0, decode it and handle
+ * the request.
+ */
+static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc)
+{
+	struct s3c_hsudc_ep *hsep = &hsudc->ep[0];
+	struct usb_ctrlrequest ctrl = {0};
+	int ret;
+
+	s3c_hsudc_nuke_ep(hsep, -EPROTO);
+	s3c_hsudc_read_setup_pkt(hsudc, (u16 *)&ctrl);
+
+	if (ctrl.bRequestType & USB_DIR_IN) {
+		hsep->bEndpointAddress |= USB_DIR_IN;
+		hsudc->ep0state = DATA_STATE_XMIT;
+	} else {
+		hsep->bEndpointAddress &= ~USB_DIR_IN;
+		hsudc->ep0state = DATA_STATE_RECV;
+	}
+
+	switch (ctrl.bRequest) {
+	case USB_REQ_SET_ADDRESS:
+		if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
+			break;
+		hsudc->ep0state = WAIT_FOR_SETUP;
+		return;
+
+	case USB_REQ_GET_STATUS:
+		if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
+			break;
+		s3c_hsudc_process_req_status(hsudc, &ctrl);
+		return;
+
+	case USB_REQ_SET_FEATURE:
+	case USB_REQ_CLEAR_FEATURE:
+		if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
+			break;
+		s3c_hsudc_handle_reqfeat(hsudc, &ctrl);
+		hsudc->ep0state = WAIT_FOR_SETUP;
+		return;
+	}
+
+	if (hsudc->driver) {
+		spin_unlock(&hsudc->lock);
+		ret = hsudc->driver->setup(&hsudc->gadget, &ctrl);
+		spin_lock(&hsudc->lock);
+
+		if (ctrl.bRequest == USB_REQ_SET_CONFIGURATION) {
+			hsep->bEndpointAddress &= ~USB_DIR_IN;
+			hsudc->ep0state = WAIT_FOR_SETUP;
+		}
+
+		if (ret < 0) {
+			dev_err(hsudc->dev, "setup failed, returned %d\n",
+						ret);
+			s3c_hsudc_set_halt(&hsep->ep, 1);
+			hsudc->ep0state = WAIT_FOR_SETUP;
+			hsep->bEndpointAddress &= ~USB_DIR_IN;
+		}
+	}
+}
+
+/** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt.
+ * @hsudc: Device controller on which endpoint 0 interrupt has occured.
+ *
+ * Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur
+ * when a stall handshake is sent to host or data is sent/received on
+ * endpoint 0.
+ */
+static void s3c_hsudc_handle_ep0_intr(struct s3c_hsudc *hsudc)
+{
+	struct s3c_hsudc_ep *hsep = &hsudc->ep[0];
+	struct s3c_hsudc_req *hsreq;
+	u32 csr = readl(hsudc->regs + S3C_EP0SR);
+	u32 ecr;
+
+	if (csr & S3C_EP0SR_STALL) {
+		ecr = readl(hsudc->regs + S3C_EP0CR);
+		ecr &= ~(S3C_ECR_STALL | S3C_ECR_FLUSH);
+		writel(ecr, hsudc->regs + S3C_EP0CR);
+
+		writel(S3C_EP0SR_STALL, hsudc->regs + S3C_EP0SR);
+		hsep->stopped = 0;
+
+		s3c_hsudc_nuke_ep(hsep, -ECONNABORTED);
+		hsudc->ep0state = WAIT_FOR_SETUP;
+		hsep->bEndpointAddress &= ~USB_DIR_IN;
+		return;
+	}
+
+	if (csr & S3C_EP0SR_TX_SUCCESS) {
+		writel(S3C_EP0SR_TX_SUCCESS, hsudc->regs + S3C_EP0SR);
+		if (ep_is_in(hsep)) {
+			if (list_empty(&hsep->queue))
+				return;
+
+			hsreq = list_entry(hsep->queue.next,
+					struct s3c_hsudc_req, queue);
+			s3c_hsudc_write_fifo(hsep, hsreq);
+		}
+	}
+
+	if (csr & S3C_EP0SR_RX_SUCCESS) {
+		if (hsudc->ep0state == WAIT_FOR_SETUP)
+			s3c_hsudc_process_setup(hsudc);
+		else {
+			if (!ep_is_in(hsep)) {
+				if (list_empty(&hsep->queue))
+					return;
+				hsreq = list_entry(hsep->queue.next,
+					struct s3c_hsudc_req, queue);
+				s3c_hsudc_read_fifo(hsep, hsreq);
+			}
+		}
+	}
+}
+
+/**
+ * s3c_hsudc_ep_enable - Enable a endpoint.
+ * @_ep: The endpoint to be enabled.
+ * @desc: Endpoint descriptor.
+ *
+ * Enables a endpoint when called from the gadget driver. Endpoint stall if
+ * any is cleared, transfer type is configured and endpoint interrupt is
+ * enabled.
+ */
+static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
+				const struct usb_endpoint_descriptor *desc)
+{
+	struct s3c_hsudc_ep *hsep;
+	struct s3c_hsudc *hsudc;
+	unsigned long flags;
+	u32 ecr = 0;
+
+	hsep = our_ep(_ep);
+	if (!_ep || !desc || hsep->desc || _ep->name == ep0name
+		|| desc->bDescriptorType != USB_DT_ENDPOINT
+		|| hsep->bEndpointAddress != desc->bEndpointAddress
+		|| ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
+		return -EINVAL;
+
+	if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+		&& usb_endpoint_maxp(desc) != ep_maxpacket(hsep))
+		|| !desc->wMaxPacketSize)
+		return -ERANGE;
+
+	hsudc = hsep->dev;
+	if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&hsudc->lock, flags);
+
+	set_index(hsudc, hsep->bEndpointAddress);
+	ecr |= ((usb_endpoint_xfer_int(desc)) ? S3C_ECR_IEMS : S3C_ECR_DUEN);
+	writel(ecr, hsudc->regs + S3C_ECR);
+
+	hsep->stopped = hsep->wedge = 0;
+	hsep->desc = desc;
+	hsep->ep.maxpacket = usb_endpoint_maxp(desc);
+
+	s3c_hsudc_set_halt(_ep, 0);
+	__set_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
+
+	spin_unlock_irqrestore(&hsudc->lock, flags);
+	return 0;
+}
+
+/**
+ * s3c_hsudc_ep_disable - Disable a endpoint.
+ * @_ep: The endpoint to be disabled.
+ * @desc: Endpoint descriptor.
+ *
+ * Disables a endpoint when called from the gadget driver.
+ */
+static int s3c_hsudc_ep_disable(struct usb_ep *_ep)
+{
+	struct s3c_hsudc_ep *hsep = our_ep(_ep);
+	struct s3c_hsudc *hsudc = hsep->dev;
+	unsigned long flags;
+
+	if (!_ep || !hsep->desc)
+		return -EINVAL;
+
+	spin_lock_irqsave(&hsudc->lock, flags);
+
+	set_index(hsudc, hsep->bEndpointAddress);
+	__clear_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
+
+	s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
+
+	hsep->desc = 0;
+	hsep->ep.desc = NULL;
+	hsep->stopped = 1;
+
+	spin_unlock_irqrestore(&hsudc->lock, flags);
+	return 0;
+}
+
+/**
+ * s3c_hsudc_alloc_request - Allocate a new request.
+ * @_ep: Endpoint for which request is allocated (not used).
+ * @gfp_flags: Flags used for the allocation.
+ *
+ * Allocates a single transfer request structure when called from gadget driver.
+ */
+static struct usb_request *s3c_hsudc_alloc_request(struct usb_ep *_ep,
+						gfp_t gfp_flags)
+{
+	struct s3c_hsudc_req *hsreq;
+
+	hsreq = kzalloc(sizeof *hsreq, gfp_flags);
+	if (!hsreq)
+		return 0;
+
+	INIT_LIST_HEAD(&hsreq->queue);
+	return &hsreq->req;
+}
+
+/**
+ * s3c_hsudc_free_request - Deallocate a request.
+ * @ep: Endpoint for which request is deallocated (not used).
+ * @_req: Request to be deallocated.
+ *
+ * Allocates a single transfer request structure when called from gadget driver.
+ */
+static void s3c_hsudc_free_request(struct usb_ep *ep, struct usb_request *_req)
+{
+	struct s3c_hsudc_req *hsreq;
+
+	hsreq = our_req(_req);
+	WARN_ON(!list_empty(&hsreq->queue));
+	kfree(hsreq);
+}
+
+/**
+ * s3c_hsudc_queue - Queue a transfer request for the endpoint.
+ * @_ep: Endpoint for which the request is queued.
+ * @_req: Request to be queued.
+ * @gfp_flags: Not used.
+ *
+ * Start or enqueue a request for a endpoint when called from gadget driver.
+ */
+static int s3c_hsudc_queue(struct usb_ep *_ep, struct usb_request *_req,
+			gfp_t gfp_flags)
+{
+	struct s3c_hsudc_req *hsreq;
+	struct s3c_hsudc_ep *hsep;
+	struct s3c_hsudc *hsudc;
+	unsigned long flags;
+	u32 offset;
+	u32 csr;
+
+	hsreq = our_req(_req);
+	if ((!_req || !_req->complete || !_req->buf ||
+		!list_empty(&hsreq->queue)))
+		return -EINVAL;
+
+	hsep = our_ep(_ep);
+	hsudc = hsep->dev;
+	if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&hsudc->lock, flags);
+	set_index(hsudc, hsep->bEndpointAddress);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	if (!ep_index(hsep) && _req->length == 0) {
+		hsudc->ep0state = WAIT_FOR_SETUP;
+		s3c_hsudc_complete_request(hsep, hsreq, 0);
+		spin_unlock_irqrestore(&hsudc->lock, flags);
+		return 0;
+	}
+
+	if (list_empty(&hsep->queue) && !hsep->stopped) {
+		offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
+		if (ep_is_in(hsep)) {
+			csr = readl((u32)hsudc->regs + offset);
+			if (!(csr & S3C_ESR_TX_SUCCESS) &&
+				(s3c_hsudc_write_fifo(hsep, hsreq) == 1))
+				hsreq = 0;
+		} else {
+			csr = readl((u32)hsudc->regs + offset);
+			if ((csr & S3C_ESR_RX_SUCCESS)
+				   && (s3c_hsudc_read_fifo(hsep, hsreq) == 1))
+				hsreq = 0;
+		}
+	}
+
+	if (hsreq != 0)
+		list_add_tail(&hsreq->queue, &hsep->queue);
+
+	spin_unlock_irqrestore(&hsudc->lock, flags);
+	return 0;
+}
+
+/**
+ * s3c_hsudc_dequeue - Dequeue a transfer request from an endpoint.
+ * @_ep: Endpoint from which the request is dequeued.
+ * @_req: Request to be dequeued.
+ *
+ * Dequeue a request from a endpoint when called from gadget driver.
+ */
+static int s3c_hsudc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct s3c_hsudc_ep *hsep = our_ep(_ep);
+	struct s3c_hsudc *hsudc = hsep->dev;
+	struct s3c_hsudc_req *hsreq;
+	unsigned long flags;
+
+	hsep = our_ep(_ep);
+	if (!_ep || hsep->ep.name == ep0name)
+		return -EINVAL;
+
+	spin_lock_irqsave(&hsudc->lock, flags);
+
+	list_for_each_entry(hsreq, &hsep->queue, queue) {
+		if (&hsreq->req == _req)
+			break;
+	}
+	if (&hsreq->req != _req) {
+		spin_unlock_irqrestore(&hsudc->lock, flags);
+		return -EINVAL;
+	}
+
+	set_index(hsudc, hsep->bEndpointAddress);
+	s3c_hsudc_complete_request(hsep, hsreq, -ECONNRESET);
+
+	spin_unlock_irqrestore(&hsudc->lock, flags);
+	return 0;
+}
+
+static struct usb_ep_ops s3c_hsudc_ep_ops = {
+	.enable = s3c_hsudc_ep_enable,
+	.disable = s3c_hsudc_ep_disable,
+	.alloc_request = s3c_hsudc_alloc_request,
+	.free_request = s3c_hsudc_free_request,
+	.queue = s3c_hsudc_queue,
+	.dequeue = s3c_hsudc_dequeue,
+	.set_halt = s3c_hsudc_set_halt,
+	.set_wedge = s3c_hsudc_set_wedge,
+};
+
+/**
+ * s3c_hsudc_initep - Initialize a endpoint to default state.
+ * @hsudc - Reference to the device controller.
+ * @hsep - Endpoint to be initialized.
+ * @epnum - Address to be assigned to the endpoint.
+ *
+ * Initialize a endpoint with default configuration.
+ */
+static void s3c_hsudc_initep(struct s3c_hsudc *hsudc,
+				struct s3c_hsudc_ep *hsep, int epnum)
+{
+	char *dir;
+
+	if ((epnum % 2) == 0) {
+		dir = "out";
+	} else {
+		dir = "in";
+		hsep->bEndpointAddress = USB_DIR_IN;
+	}
+
+	hsep->bEndpointAddress |= epnum;
+	if (epnum)
+		snprintf(hsep->name, sizeof(hsep->name), "ep%d%s", epnum, dir);
+	else
+		snprintf(hsep->name, sizeof(hsep->name), "%s", ep0name);
+
+	INIT_LIST_HEAD(&hsep->queue);
+	INIT_LIST_HEAD(&hsep->ep.ep_list);
+	if (epnum)
+		list_add_tail(&hsep->ep.ep_list, &hsudc->gadget.ep_list);
+
+	hsep->dev = hsudc;
+	hsep->ep.name = hsep->name;
+	hsep->ep.maxpacket = epnum ? 512 : 64;
+	hsep->ep.ops = &s3c_hsudc_ep_ops;
+	hsep->fifo = hsudc->regs + S3C_BR(epnum);
+	hsep->desc = 0;
+	hsep->ep.desc = NULL;
+	hsep->stopped = 0;
+	hsep->wedge = 0;
+
+	set_index(hsudc, epnum);
+	writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR);
+}
+
+/**
+ * s3c_hsudc_setup_ep - Configure all endpoints to default state.
+ * @hsudc: Reference to device controller.
+ *
+ * Configures all endpoints to default state.
+ */
+static void s3c_hsudc_setup_ep(struct s3c_hsudc *hsudc)
+{
+	int epnum;
+
+	hsudc->ep0state = WAIT_FOR_SETUP;
+	INIT_LIST_HEAD(&hsudc->gadget.ep_list);
+	for (epnum = 0; epnum < hsudc->pd->epnum; epnum++)
+		s3c_hsudc_initep(hsudc, &hsudc->ep[epnum], epnum);
+}
+
+/**
+ * s3c_hsudc_reconfig - Reconfigure the device controller to default state.
+ * @hsudc: Reference to device controller.
+ *
+ * Reconfigures the device controller registers to a default state.
+ */
+static void s3c_hsudc_reconfig(struct s3c_hsudc *hsudc)
+{
+	writel(0xAA, hsudc->regs + S3C_EDR);
+	writel(1, hsudc->regs + S3C_EIER);
+	writel(0, hsudc->regs + S3C_TR);
+	writel(S3C_SCR_DTZIEN_EN | S3C_SCR_RRD_EN | S3C_SCR_SUS_EN |
+			S3C_SCR_RST_EN, hsudc->regs + S3C_SCR);
+	writel(0, hsudc->regs + S3C_EP0CR);
+
+	s3c_hsudc_setup_ep(hsudc);
+}
+
+/**
+ * s3c_hsudc_irq - Interrupt handler for device controller.
+ * @irq: Not used.
+ * @_dev: Reference to the device controller.
+ *
+ * Interrupt handler for the device controller. This handler handles controller
+ * interrupts and endpoint interrupts.
+ */
+static irqreturn_t s3c_hsudc_irq(int irq, void *_dev)
+{
+	struct s3c_hsudc *hsudc = _dev;
+	struct s3c_hsudc_ep *hsep;
+	u32 ep_intr;
+	u32 sys_status;
+	u32 ep_idx;
+
+	spin_lock(&hsudc->lock);
+
+	sys_status = readl(hsudc->regs + S3C_SSR);
+	ep_intr = readl(hsudc->regs + S3C_EIR) & 0x3FF;
+
+	if (!ep_intr && !(sys_status & S3C_SSR_DTZIEN_EN)) {
+		spin_unlock(&hsudc->lock);
+		return IRQ_HANDLED;
+	}
+
+	if (sys_status) {
+		if (sys_status & S3C_SSR_VBUSON)
+			writel(S3C_SSR_VBUSON, hsudc->regs + S3C_SSR);
+
+		if (sys_status & S3C_SSR_ERR)
+			writel(S3C_SSR_ERR, hsudc->regs + S3C_SSR);
+
+		if (sys_status & S3C_SSR_SDE) {
+			writel(S3C_SSR_SDE, hsudc->regs + S3C_SSR);
+			hsudc->gadget.speed = (sys_status & S3C_SSR_HSP) ?
+				USB_SPEED_HIGH : USB_SPEED_FULL;
+		}
+
+		if (sys_status & S3C_SSR_SUSPEND) {
+			writel(S3C_SSR_SUSPEND, hsudc->regs + S3C_SSR);
+			if (hsudc->gadget.speed != USB_SPEED_UNKNOWN
+				&& hsudc->driver && hsudc->driver->suspend)
+				hsudc->driver->suspend(&hsudc->gadget);
+		}
+
+		if (sys_status & S3C_SSR_RESUME) {
+			writel(S3C_SSR_RESUME, hsudc->regs + S3C_SSR);
+			if (hsudc->gadget.speed != USB_SPEED_UNKNOWN
+				&& hsudc->driver && hsudc->driver->resume)
+				hsudc->driver->resume(&hsudc->gadget);
+		}
+
+		if (sys_status & S3C_SSR_RESET) {
+			writel(S3C_SSR_RESET, hsudc->regs + S3C_SSR);
+			for (ep_idx = 0; ep_idx < hsudc->pd->epnum; ep_idx++) {
+				hsep = &hsudc->ep[ep_idx];
+				hsep->stopped = 1;
+				s3c_hsudc_nuke_ep(hsep, -ECONNRESET);
+			}
+			s3c_hsudc_reconfig(hsudc);
+			hsudc->ep0state = WAIT_FOR_SETUP;
+		}
+	}
+
+	if (ep_intr & S3C_EIR_EP0) {
+		writel(S3C_EIR_EP0, hsudc->regs + S3C_EIR);
+		set_index(hsudc, 0);
+		s3c_hsudc_handle_ep0_intr(hsudc);
+	}
+
+	ep_intr >>= 1;
+	ep_idx = 1;
+	while (ep_intr) {
+		if (ep_intr & 1)  {
+			hsep = &hsudc->ep[ep_idx];
+			set_index(hsudc, ep_idx);
+			writel(1 << ep_idx, hsudc->regs + S3C_EIR);
+			if (ep_is_in(hsep))
+				s3c_hsudc_epin_intr(hsudc, ep_idx);
+			else
+				s3c_hsudc_epout_intr(hsudc, ep_idx);
+		}
+		ep_intr >>= 1;
+		ep_idx++;
+	}
+
+	spin_unlock(&hsudc->lock);
+	return IRQ_HANDLED;
+}
+
+static int s3c_hsudc_start(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct s3c_hsudc *hsudc = to_hsudc(gadget);
+	int ret;
+
+	if (!driver
+		|| driver->max_speed < USB_SPEED_FULL
+		|| !driver->setup)
+		return -EINVAL;
+
+	if (!hsudc)
+		return -ENODEV;
+
+	if (hsudc->driver)
+		return -EBUSY;
+
+	hsudc->driver = driver;
+	hsudc->gadget.dev.driver = &driver->driver;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(hsudc->supplies),
+				    hsudc->supplies);
+	if (ret != 0) {
+		dev_err(hsudc->dev, "failed to enable supplies: %d\n", ret);
+		goto err_supplies;
+	}
+
+	/* connect to bus through transceiver */
+	if (hsudc->transceiver) {
+		ret = otg_set_peripheral(hsudc->transceiver->otg,
+					&hsudc->gadget);
+		if (ret) {
+			dev_err(hsudc->dev, "%s: can't bind to transceiver\n",
+					hsudc->gadget.name);
+			goto err_otg;
+		}
+	}
+
+	enable_irq(hsudc->irq);
+	dev_info(hsudc->dev, "bound driver %s\n", driver->driver.name);
+
+	s3c_hsudc_reconfig(hsudc);
+
+	pm_runtime_get_sync(hsudc->dev);
+
+	s3c_hsudc_init_phy();
+	if (hsudc->pd->gpio_init)
+		hsudc->pd->gpio_init();
+
+	return 0;
+err_otg:
+	regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
+	hsudc->driver = NULL;
+	hsudc->gadget.dev.driver = NULL;
+	return ret;
+}
+
+static int s3c_hsudc_stop(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct s3c_hsudc *hsudc = to_hsudc(gadget);
+	unsigned long flags;
+
+	if (!hsudc)
+		return -ENODEV;
+
+	if (!driver || driver != hsudc->driver)
+		return -EINVAL;
+
+	spin_lock_irqsave(&hsudc->lock, flags);
+	hsudc->driver = NULL;
+	hsudc->gadget.dev.driver = NULL;
+	hsudc->gadget.speed = USB_SPEED_UNKNOWN;
+	s3c_hsudc_uninit_phy();
+
+	pm_runtime_put(hsudc->dev);
+
+	if (hsudc->pd->gpio_uninit)
+		hsudc->pd->gpio_uninit();
+	s3c_hsudc_stop_activity(hsudc);
+	spin_unlock_irqrestore(&hsudc->lock, flags);
+
+	if (hsudc->transceiver)
+		(void) otg_set_peripheral(hsudc->transceiver->otg, NULL);
+
+	disable_irq(hsudc->irq);
+
+	regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+
+	dev_info(hsudc->dev, "unregistered gadget driver '%s'\n",
+			driver->driver.name);
+	return 0;
+}
+
+static inline u32 s3c_hsudc_read_frameno(struct s3c_hsudc *hsudc)
+{
+	return readl(hsudc->regs + S3C_FNR) & 0x3FF;
+}
+
+static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
+{
+	return s3c_hsudc_read_frameno(to_hsudc(gadget));
+}
+
+static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	struct s3c_hsudc *hsudc = to_hsudc(gadget);
+
+	if (!hsudc)
+		return -ENODEV;
+
+	if (hsudc->transceiver)
+		return usb_phy_set_power(hsudc->transceiver, mA);
+
+	return -EOPNOTSUPP;
+}
+
+static struct usb_gadget_ops s3c_hsudc_gadget_ops = {
+	.get_frame	= s3c_hsudc_gadget_getframe,
+	.udc_start	= s3c_hsudc_start,
+	.udc_stop	= s3c_hsudc_stop,
+	.vbus_draw	= s3c_hsudc_vbus_draw,
+};
+
+static int __devinit s3c_hsudc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct s3c_hsudc *hsudc;
+	struct s3c24xx_hsudc_platdata *pd = pdev->dev.platform_data;
+	int ret, i;
+
+	hsudc = kzalloc(sizeof(struct s3c_hsudc) +
+			sizeof(struct s3c_hsudc_ep) * pd->epnum,
+			GFP_KERNEL);
+	if (!hsudc) {
+		dev_err(dev, "cannot allocate memory\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, dev);
+	hsudc->dev = dev;
+	hsudc->pd = pdev->dev.platform_data;
+
+	hsudc->transceiver = usb_get_transceiver();
+
+	for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++)
+		hsudc->supplies[i].supply = s3c_hsudc_supply_names[i];
+
+	ret = regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
+				 hsudc->supplies);
+	if (ret != 0) {
+		dev_err(dev, "failed to request supplies: %d\n", ret);
+		goto err_supplies;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "unable to obtain driver resource data\n");
+		ret = -ENODEV;
+		goto err_res;
+	}
+
+	hsudc->mem_rsrc = request_mem_region(res->start, resource_size(res),
+				dev_name(&pdev->dev));
+	if (!hsudc->mem_rsrc) {
+		dev_err(dev, "failed to reserve register area\n");
+		ret = -ENODEV;
+		goto err_res;
+	}
+
+	hsudc->regs = ioremap(res->start, resource_size(res));
+	if (!hsudc->regs) {
+		dev_err(dev, "error mapping device register area\n");
+		ret = -EBUSY;
+		goto err_remap;
+	}
+
+	spin_lock_init(&hsudc->lock);
+
+	dev_set_name(&hsudc->gadget.dev, "gadget");
+
+	hsudc->gadget.max_speed = USB_SPEED_HIGH;
+	hsudc->gadget.ops = &s3c_hsudc_gadget_ops;
+	hsudc->gadget.name = dev_name(dev);
+	hsudc->gadget.dev.parent = dev;
+	hsudc->gadget.dev.dma_mask = dev->dma_mask;
+	hsudc->gadget.ep0 = &hsudc->ep[0].ep;
+
+	hsudc->gadget.is_otg = 0;
+	hsudc->gadget.is_a_peripheral = 0;
+	hsudc->gadget.speed = USB_SPEED_UNKNOWN;
+
+	s3c_hsudc_setup_ep(hsudc);
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(dev, "unable to obtain IRQ number\n");
+		goto err_irq;
+	}
+	hsudc->irq = ret;
+
+	ret = request_irq(hsudc->irq, s3c_hsudc_irq, 0, driver_name, hsudc);
+	if (ret < 0) {
+		dev_err(dev, "irq request failed\n");
+		goto err_irq;
+	}
+
+	hsudc->uclk = clk_get(&pdev->dev, "usb-device");
+	if (IS_ERR(hsudc->uclk)) {
+		dev_err(dev, "failed to find usb-device clock source\n");
+		ret = PTR_ERR(hsudc->uclk);
+		goto err_clk;
+	}
+	clk_enable(hsudc->uclk);
+
+	local_irq_disable();
+
+	disable_irq(hsudc->irq);
+	local_irq_enable();
+
+	ret = device_register(&hsudc->gadget.dev);
+	if (ret) {
+		put_device(&hsudc->gadget.dev);
+		goto err_add_device;
+	}
+
+	ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget);
+	if (ret)
+		goto err_add_udc;
+
+	pm_runtime_enable(dev);
+
+	return 0;
+err_add_udc:
+	device_unregister(&hsudc->gadget.dev);
+err_add_device:
+	clk_disable(hsudc->uclk);
+	clk_put(hsudc->uclk);
+err_clk:
+	free_irq(hsudc->irq, hsudc);
+err_irq:
+	iounmap(hsudc->regs);
+
+err_remap:
+	release_mem_region(res->start, resource_size(res));
+err_res:
+	if (hsudc->transceiver)
+		usb_put_transceiver(hsudc->transceiver);
+
+	regulator_bulk_free(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
+	kfree(hsudc);
+	return ret;
+}
+
+static struct platform_driver s3c_hsudc_driver = {
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "s3c-hsudc",
+	},
+	.probe		= s3c_hsudc_probe,
+};
+
+module_platform_driver(s3c_hsudc_driver);
+
+MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver");
+MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsudc");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c2410_udc.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c2410_udc.c
new file mode 100644
index 0000000..195524c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c2410_udc.c
@@ -0,0 +1,2106 @@
+/*
+ * linux/drivers/usb/gadget/s3c2410_udc.c
+ *
+ * Samsung S3C24xx series on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ *	Additional cleanups by Ben Dooks <ben-linux@fluff.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/prefetch.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/usb.h>
+#include <linux/usb/gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+#include <mach/irqs.h>
+
+#include <mach/hardware.h>
+
+#include <plat/regs-udc.h>
+#include <plat/udc.h>
+
+
+#include "s3c2410_udc.h"
+
+#define DRIVER_DESC	"S3C2410 USB Device Controller Gadget"
+#define DRIVER_VERSION	"29 Apr 2007"
+#define DRIVER_AUTHOR	"Herbert Pötzl <herbert@13thfloor.at>, " \
+			"Arnaud Patard <arnaud.patard@rtp-net.org>"
+
+static const char		gadget_name[] = "s3c2410_udc";
+static const char		driver_desc[] = DRIVER_DESC;
+
+static struct s3c2410_udc	*the_controller;
+static struct clk		*udc_clock;
+static struct clk		*usb_bus_clock;
+static void __iomem		*base_addr;
+static u64			rsrc_start;
+static u64			rsrc_len;
+static struct dentry		*s3c2410_udc_debugfs_root;
+
+static inline u32 udc_read(u32 reg)
+{
+	return readb(base_addr + reg);
+}
+
+static inline void udc_write(u32 value, u32 reg)
+{
+	writeb(value, base_addr + reg);
+}
+
+static inline void udc_writeb(void __iomem *base, u32 value, u32 reg)
+{
+	writeb(value, base + reg);
+}
+
+static struct s3c2410_udc_mach_info *udc_info;
+
+/*************************** DEBUG FUNCTION ***************************/
+#define DEBUG_NORMAL	1
+#define DEBUG_VERBOSE	2
+
+#ifdef CONFIG_USB_S3C2410_DEBUG
+#define USB_S3C2410_DEBUG_LEVEL 0
+
+static uint32_t s3c2410_ticks = 0;
+
+static int dprintk(int level, const char *fmt, ...)
+{
+	static char printk_buf[1024];
+	static long prevticks;
+	static int invocation;
+	va_list args;
+	int len;
+
+	if (level > USB_S3C2410_DEBUG_LEVEL)
+		return 0;
+
+	if (s3c2410_ticks != prevticks) {
+		prevticks = s3c2410_ticks;
+		invocation = 0;
+	}
+
+	len = scnprintf(printk_buf,
+			sizeof(printk_buf), "%1lu.%02d USB: ",
+			prevticks, invocation++);
+
+	va_start(args, fmt);
+	len = vscnprintf(printk_buf+len,
+			sizeof(printk_buf)-len, fmt, args);
+	va_end(args);
+
+	return printk(KERN_DEBUG "%s", printk_buf);
+}
+#else
+static int dprintk(int level, const char *fmt, ...)
+{
+	return 0;
+}
+#endif
+static int s3c2410_udc_debugfs_seq_show(struct seq_file *m, void *p)
+{
+	u32 addr_reg,pwr_reg,ep_int_reg,usb_int_reg;
+	u32 ep_int_en_reg, usb_int_en_reg, ep0_csr;
+	u32 ep1_i_csr1,ep1_i_csr2,ep1_o_csr1,ep1_o_csr2;
+	u32 ep2_i_csr1,ep2_i_csr2,ep2_o_csr1,ep2_o_csr2;
+
+	addr_reg       = udc_read(S3C2410_UDC_FUNC_ADDR_REG);
+	pwr_reg        = udc_read(S3C2410_UDC_PWR_REG);
+	ep_int_reg     = udc_read(S3C2410_UDC_EP_INT_REG);
+	usb_int_reg    = udc_read(S3C2410_UDC_USB_INT_REG);
+	ep_int_en_reg  = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+	usb_int_en_reg = udc_read(S3C2410_UDC_USB_INT_EN_REG);
+	udc_write(0, S3C2410_UDC_INDEX_REG);
+	ep0_csr        = udc_read(S3C2410_UDC_IN_CSR1_REG);
+	udc_write(1, S3C2410_UDC_INDEX_REG);
+	ep1_i_csr1     = udc_read(S3C2410_UDC_IN_CSR1_REG);
+	ep1_i_csr2     = udc_read(S3C2410_UDC_IN_CSR2_REG);
+	ep1_o_csr1     = udc_read(S3C2410_UDC_IN_CSR1_REG);
+	ep1_o_csr2     = udc_read(S3C2410_UDC_IN_CSR2_REG);
+	udc_write(2, S3C2410_UDC_INDEX_REG);
+	ep2_i_csr1     = udc_read(S3C2410_UDC_IN_CSR1_REG);
+	ep2_i_csr2     = udc_read(S3C2410_UDC_IN_CSR2_REG);
+	ep2_o_csr1     = udc_read(S3C2410_UDC_IN_CSR1_REG);
+	ep2_o_csr2     = udc_read(S3C2410_UDC_IN_CSR2_REG);
+
+	seq_printf(m, "FUNC_ADDR_REG  : 0x%04X\n"
+		 "PWR_REG        : 0x%04X\n"
+		 "EP_INT_REG     : 0x%04X\n"
+		 "USB_INT_REG    : 0x%04X\n"
+		 "EP_INT_EN_REG  : 0x%04X\n"
+		 "USB_INT_EN_REG : 0x%04X\n"
+		 "EP0_CSR        : 0x%04X\n"
+		 "EP1_I_CSR1     : 0x%04X\n"
+		 "EP1_I_CSR2     : 0x%04X\n"
+		 "EP1_O_CSR1     : 0x%04X\n"
+		 "EP1_O_CSR2     : 0x%04X\n"
+		 "EP2_I_CSR1     : 0x%04X\n"
+		 "EP2_I_CSR2     : 0x%04X\n"
+		 "EP2_O_CSR1     : 0x%04X\n"
+		 "EP2_O_CSR2     : 0x%04X\n",
+			addr_reg,pwr_reg,ep_int_reg,usb_int_reg,
+			ep_int_en_reg, usb_int_en_reg, ep0_csr,
+			ep1_i_csr1,ep1_i_csr2,ep1_o_csr1,ep1_o_csr2,
+			ep2_i_csr1,ep2_i_csr2,ep2_o_csr1,ep2_o_csr2
+		);
+
+	return 0;
+}
+
+static int s3c2410_udc_debugfs_fops_open(struct inode *inode,
+					 struct file *file)
+{
+	return single_open(file, s3c2410_udc_debugfs_seq_show, NULL);
+}
+
+static const struct file_operations s3c2410_udc_debugfs_fops = {
+	.open		= s3c2410_udc_debugfs_fops_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.owner		= THIS_MODULE,
+};
+
+/* io macros */
+
+static inline void s3c2410_udc_clear_ep0_opr(void __iomem *base)
+{
+	udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	udc_writeb(base, S3C2410_UDC_EP0_CSR_SOPKTRDY,
+			S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_clear_ep0_sst(void __iomem *base)
+{
+	udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	writeb(0x00, base + S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_clear_ep0_se(void __iomem *base)
+{
+	udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	udc_writeb(base, S3C2410_UDC_EP0_CSR_SSE, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_ipr(void __iomem *base)
+{
+	udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	udc_writeb(base, S3C2410_UDC_EP0_CSR_IPKRDY, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de(void __iomem *base)
+{
+	udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	udc_writeb(base, S3C2410_UDC_EP0_CSR_DE, S3C2410_UDC_EP0_CSR_REG);
+}
+
+inline void s3c2410_udc_set_ep0_ss(void __iomem *b)
+{
+	udc_writeb(b, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	udc_writeb(b, S3C2410_UDC_EP0_CSR_SENDSTL, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de_out(void __iomem *base)
+{
+	udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+
+	udc_writeb(base,(S3C2410_UDC_EP0_CSR_SOPKTRDY
+				| S3C2410_UDC_EP0_CSR_DE),
+			S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_sse_out(void __iomem *base)
+{
+	udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	udc_writeb(base, (S3C2410_UDC_EP0_CSR_SOPKTRDY
+				| S3C2410_UDC_EP0_CSR_SSE),
+			S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de_in(void __iomem *base)
+{
+	udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	udc_writeb(base, (S3C2410_UDC_EP0_CSR_IPKRDY
+			| S3C2410_UDC_EP0_CSR_DE),
+		S3C2410_UDC_EP0_CSR_REG);
+}
+
+/*------------------------- I/O ----------------------------------*/
+
+/*
+ *	s3c2410_udc_done
+ */
+static void s3c2410_udc_done(struct s3c2410_ep *ep,
+		struct s3c2410_request *req, int status)
+{
+	unsigned halted = ep->halted;
+
+	list_del_init(&req->queue);
+
+	if (likely (req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	ep->halted = 1;
+	req->req.complete(&ep->ep, &req->req);
+	ep->halted = halted;
+}
+
+static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
+		struct s3c2410_ep *ep, int status)
+{
+	/* Sanity check */
+	if (&ep->queue == NULL)
+		return;
+
+	while (!list_empty (&ep->queue)) {
+		struct s3c2410_request *req;
+		req = list_entry (ep->queue.next, struct s3c2410_request,
+				queue);
+		s3c2410_udc_done(ep, req, status);
+	}
+}
+
+static inline void s3c2410_udc_clear_ep_state(struct s3c2410_udc *dev)
+{
+	unsigned i;
+
+	/* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
+	 * fifos, and pending transactions mustn't be continued in any case.
+	 */
+
+	for (i = 1; i < S3C2410_ENDPOINTS; i++)
+		s3c2410_udc_nuke(dev, &dev->ep[i], -ECONNABORTED);
+}
+
+static inline int s3c2410_udc_fifo_count_out(void)
+{
+	int tmp;
+
+	tmp = udc_read(S3C2410_UDC_OUT_FIFO_CNT2_REG) << 8;
+	tmp |= udc_read(S3C2410_UDC_OUT_FIFO_CNT1_REG);
+	return tmp;
+}
+
+/*
+ *	s3c2410_udc_write_packet
+ */
+static inline int s3c2410_udc_write_packet(int fifo,
+		struct s3c2410_request *req,
+		unsigned max)
+{
+	unsigned len = min(req->req.length - req->req.actual, max);
+	u8 *buf = req->req.buf + req->req.actual;
+
+	prefetch(buf);
+
+	dprintk(DEBUG_VERBOSE, "%s %d %d %d %d\n", __func__,
+		req->req.actual, req->req.length, len, req->req.actual + len);
+
+	req->req.actual += len;
+
+	udelay(5);
+	writesb(base_addr + fifo, buf, len);
+	return len;
+}
+
+/*
+ *	s3c2410_udc_write_fifo
+ *
+ * return:  0 = still running, 1 = completed, negative = errno
+ */
+static int s3c2410_udc_write_fifo(struct s3c2410_ep *ep,
+		struct s3c2410_request *req)
+{
+	unsigned	count;
+	int		is_last;
+	u32		idx;
+	int		fifo_reg;
+	u32		ep_csr;
+
+	idx = ep->bEndpointAddress & 0x7F;
+	switch (idx) {
+	default:
+		idx = 0;
+	case 0:
+		fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
+		break;
+	case 1:
+		fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
+		break;
+	case 2:
+		fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
+		break;
+	case 3:
+		fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
+		break;
+	case 4:
+		fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
+		break;
+	}
+
+	count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket);
+
+	/* last packet is often short (sometimes a zlp) */
+	if (count != ep->ep.maxpacket)
+		is_last = 1;
+	else if (req->req.length != req->req.actual || req->req.zero)
+		is_last = 0;
+	else
+		is_last = 2;
+
+	/* Only ep0 debug messages are interesting */
+	if (idx == 0)
+		dprintk(DEBUG_NORMAL,
+			"Written ep%d %d.%d of %d b [last %d,z %d]\n",
+			idx, count, req->req.actual, req->req.length,
+			is_last, req->req.zero);
+
+	if (is_last) {
+		/* The order is important. It prevents sending 2 packets
+		 * at the same time */
+
+		if (idx == 0) {
+			/* Reset signal => no need to say 'data sent' */
+			if (! (udc_read(S3C2410_UDC_USB_INT_REG)
+					& S3C2410_UDC_USBINT_RESET))
+				s3c2410_udc_set_ep0_de_in(base_addr);
+			ep->dev->ep0state=EP0_IDLE;
+		} else {
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
+					S3C2410_UDC_IN_CSR1_REG);
+		}
+
+		s3c2410_udc_done(ep, req, 0);
+		is_last = 1;
+	} else {
+		if (idx == 0) {
+			/* Reset signal => no need to say 'data sent' */
+			if (! (udc_read(S3C2410_UDC_USB_INT_REG)
+					& S3C2410_UDC_USBINT_RESET))
+				s3c2410_udc_set_ep0_ipr(base_addr);
+		} else {
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
+					S3C2410_UDC_IN_CSR1_REG);
+		}
+	}
+
+	return is_last;
+}
+
+static inline int s3c2410_udc_read_packet(int fifo, u8 *buf,
+		struct s3c2410_request *req, unsigned avail)
+{
+	unsigned len;
+
+	len = min(req->req.length - req->req.actual, avail);
+	req->req.actual += len;
+
+	readsb(fifo + base_addr, buf, len);
+	return len;
+}
+
+/*
+ * return:  0 = still running, 1 = queue empty, negative = errno
+ */
+static int s3c2410_udc_read_fifo(struct s3c2410_ep *ep,
+				 struct s3c2410_request *req)
+{
+	u8		*buf;
+	u32		ep_csr;
+	unsigned	bufferspace;
+	int		is_last=1;
+	unsigned	avail;
+	int		fifo_count = 0;
+	u32		idx;
+	int		fifo_reg;
+
+	idx = ep->bEndpointAddress & 0x7F;
+
+	switch (idx) {
+	default:
+		idx = 0;
+	case 0:
+		fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
+		break;
+	case 1:
+		fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
+		break;
+	case 2:
+		fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
+		break;
+	case 3:
+		fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
+		break;
+	case 4:
+		fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
+		break;
+	}
+
+	if (!req->req.length)
+		return 1;
+
+	buf = req->req.buf + req->req.actual;
+	bufferspace = req->req.length - req->req.actual;
+	if (!bufferspace) {
+		dprintk(DEBUG_NORMAL, "%s: buffer full!\n", __func__);
+		return -1;
+	}
+
+	udc_write(idx, S3C2410_UDC_INDEX_REG);
+
+	fifo_count = s3c2410_udc_fifo_count_out();
+	dprintk(DEBUG_NORMAL, "%s fifo count : %d\n", __func__, fifo_count);
+
+	if (fifo_count > ep->ep.maxpacket)
+		avail = ep->ep.maxpacket;
+	else
+		avail = fifo_count;
+
+	fifo_count = s3c2410_udc_read_packet(fifo_reg, buf, req, avail);
+
+	/* checking this with ep0 is not accurate as we already
+	 * read a control request
+	 **/
+	if (idx != 0 && fifo_count < ep->ep.maxpacket) {
+		is_last = 1;
+		/* overflowed this request?  flush extra data */
+		if (fifo_count != avail)
+			req->req.status = -EOVERFLOW;
+	} else {
+		is_last = (req->req.length <= req->req.actual) ? 1 : 0;
+	}
+
+	udc_write(idx, S3C2410_UDC_INDEX_REG);
+	fifo_count = s3c2410_udc_fifo_count_out();
+
+	/* Only ep0 debug messages are interesting */
+	if (idx == 0)
+		dprintk(DEBUG_VERBOSE, "%s fifo count : %d [last %d]\n",
+			__func__, fifo_count,is_last);
+
+	if (is_last) {
+		if (idx == 0) {
+			s3c2410_udc_set_ep0_de_out(base_addr);
+			ep->dev->ep0state = EP0_IDLE;
+		} else {
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
+					S3C2410_UDC_OUT_CSR1_REG);
+		}
+
+		s3c2410_udc_done(ep, req, 0);
+	} else {
+		if (idx == 0) {
+			s3c2410_udc_clear_ep0_opr(base_addr);
+		} else {
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
+					S3C2410_UDC_OUT_CSR1_REG);
+		}
+	}
+
+	return is_last;
+}
+
+static int s3c2410_udc_read_fifo_crq(struct usb_ctrlrequest *crq)
+{
+	unsigned char *outbuf = (unsigned char*)crq;
+	int bytes_read = 0;
+
+	udc_write(0, S3C2410_UDC_INDEX_REG);
+
+	bytes_read = s3c2410_udc_fifo_count_out();
+
+	dprintk(DEBUG_NORMAL, "%s: fifo_count=%d\n", __func__, bytes_read);
+
+	if (bytes_read > sizeof(struct usb_ctrlrequest))
+		bytes_read = sizeof(struct usb_ctrlrequest);
+
+	readsb(S3C2410_UDC_EP0_FIFO_REG + base_addr, outbuf, bytes_read);
+
+	dprintk(DEBUG_VERBOSE, "%s: len=%d %02x:%02x {%x,%x,%x}\n", __func__,
+		bytes_read, crq->bRequest, crq->bRequestType,
+		crq->wValue, crq->wIndex, crq->wLength);
+
+	return bytes_read;
+}
+
+static int s3c2410_udc_get_status(struct s3c2410_udc *dev,
+		struct usb_ctrlrequest *crq)
+{
+	u16 status = 0;
+	u8 ep_num = crq->wIndex & 0x7F;
+	u8 is_in = crq->wIndex & USB_DIR_IN;
+
+	switch (crq->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_INTERFACE:
+		break;
+
+	case USB_RECIP_DEVICE:
+		status = dev->devstatus;
+		break;
+
+	case USB_RECIP_ENDPOINT:
+		if (ep_num > 4 || crq->wLength > 2)
+			return 1;
+
+		if (ep_num == 0) {
+			udc_write(0, S3C2410_UDC_INDEX_REG);
+			status = udc_read(S3C2410_UDC_IN_CSR1_REG);
+			status = status & S3C2410_UDC_EP0_CSR_SENDSTL;
+		} else {
+			udc_write(ep_num, S3C2410_UDC_INDEX_REG);
+			if (is_in) {
+				status = udc_read(S3C2410_UDC_IN_CSR1_REG);
+				status = status & S3C2410_UDC_ICSR1_SENDSTL;
+			} else {
+				status = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+				status = status & S3C2410_UDC_OCSR1_SENDSTL;
+			}
+		}
+
+		status = status ? 1 : 0;
+		break;
+
+	default:
+		return 1;
+	}
+
+	/* Seems to be needed to get it working. ouch :( */
+	udelay(5);
+	udc_write(status & 0xFF, S3C2410_UDC_EP0_FIFO_REG);
+	udc_write(status >> 8, S3C2410_UDC_EP0_FIFO_REG);
+	s3c2410_udc_set_ep0_de_in(base_addr);
+
+	return 0;
+}
+/*------------------------- usb state machine -------------------------------*/
+static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value);
+
+static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev,
+					struct s3c2410_ep *ep,
+					struct usb_ctrlrequest *crq,
+					u32 ep0csr)
+{
+	int len, ret, tmp;
+
+	/* start control request? */
+	if (!(ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY))
+		return;
+
+	s3c2410_udc_nuke(dev, ep, -EPROTO);
+
+	len = s3c2410_udc_read_fifo_crq(crq);
+	if (len != sizeof(*crq)) {
+		dprintk(DEBUG_NORMAL, "setup begin: fifo READ ERROR"
+			" wanted %d bytes got %d. Stalling out...\n",
+			sizeof(*crq), len);
+		s3c2410_udc_set_ep0_ss(base_addr);
+		return;
+	}
+
+	dprintk(DEBUG_NORMAL, "bRequest = %d bRequestType %d wLength = %d\n",
+		crq->bRequest, crq->bRequestType, crq->wLength);
+
+	/* cope with automagic for some standard requests. */
+	dev->req_std = (crq->bRequestType & USB_TYPE_MASK)
+		== USB_TYPE_STANDARD;
+	dev->req_config = 0;
+	dev->req_pending = 1;
+
+	switch (crq->bRequest) {
+	case USB_REQ_SET_CONFIGURATION:
+		dprintk(DEBUG_NORMAL, "USB_REQ_SET_CONFIGURATION ... \n");
+
+		if (crq->bRequestType == USB_RECIP_DEVICE) {
+			dev->req_config = 1;
+			s3c2410_udc_set_ep0_de_out(base_addr);
+		}
+		break;
+
+	case USB_REQ_SET_INTERFACE:
+		dprintk(DEBUG_NORMAL, "USB_REQ_SET_INTERFACE ... \n");
+
+		if (crq->bRequestType == USB_RECIP_INTERFACE) {
+			dev->req_config = 1;
+			s3c2410_udc_set_ep0_de_out(base_addr);
+		}
+		break;
+
+	case USB_REQ_SET_ADDRESS:
+		dprintk(DEBUG_NORMAL, "USB_REQ_SET_ADDRESS ... \n");
+
+		if (crq->bRequestType == USB_RECIP_DEVICE) {
+			tmp = crq->wValue & 0x7F;
+			dev->address = tmp;
+			udc_write((tmp | S3C2410_UDC_FUNCADDR_UPDATE),
+					S3C2410_UDC_FUNC_ADDR_REG);
+			s3c2410_udc_set_ep0_de_out(base_addr);
+			return;
+		}
+		break;
+
+	case USB_REQ_GET_STATUS:
+		dprintk(DEBUG_NORMAL, "USB_REQ_GET_STATUS ... \n");
+		s3c2410_udc_clear_ep0_opr(base_addr);
+
+		if (dev->req_std) {
+			if (!s3c2410_udc_get_status(dev, crq)) {
+				return;
+			}
+		}
+		break;
+
+	case USB_REQ_CLEAR_FEATURE:
+		s3c2410_udc_clear_ep0_opr(base_addr);
+
+		if (crq->bRequestType != USB_RECIP_ENDPOINT)
+			break;
+
+		if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
+			break;
+
+		s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 0);
+		s3c2410_udc_set_ep0_de_out(base_addr);
+		return;
+
+	case USB_REQ_SET_FEATURE:
+		s3c2410_udc_clear_ep0_opr(base_addr);
+
+		if (crq->bRequestType != USB_RECIP_ENDPOINT)
+			break;
+
+		if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
+			break;
+
+		s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 1);
+		s3c2410_udc_set_ep0_de_out(base_addr);
+		return;
+
+	default:
+		s3c2410_udc_clear_ep0_opr(base_addr);
+		break;
+	}
+
+	if (crq->bRequestType & USB_DIR_IN)
+		dev->ep0state = EP0_IN_DATA_PHASE;
+	else
+		dev->ep0state = EP0_OUT_DATA_PHASE;
+
+	if (!dev->driver)
+		return;
+
+	/* deliver the request to the gadget driver */
+	ret = dev->driver->setup(&dev->gadget, crq);
+	if (ret < 0) {
+		if (dev->req_config) {
+			dprintk(DEBUG_NORMAL, "config change %02x fail %d?\n",
+				crq->bRequest, ret);
+			return;
+		}
+
+		if (ret == -EOPNOTSUPP)
+			dprintk(DEBUG_NORMAL, "Operation not supported\n");
+		else
+			dprintk(DEBUG_NORMAL,
+				"dev->driver->setup failed. (%d)\n", ret);
+
+		udelay(5);
+		s3c2410_udc_set_ep0_ss(base_addr);
+		s3c2410_udc_set_ep0_de_out(base_addr);
+		dev->ep0state = EP0_IDLE;
+		/* deferred i/o == no response yet */
+	} else if (dev->req_pending) {
+		dprintk(DEBUG_VERBOSE, "dev->req_pending... what now?\n");
+		dev->req_pending=0;
+	}
+
+	dprintk(DEBUG_VERBOSE, "ep0state %s\n", ep0states[dev->ep0state]);
+}
+
+static void s3c2410_udc_handle_ep0(struct s3c2410_udc *dev)
+{
+	u32			ep0csr;
+	struct s3c2410_ep	*ep = &dev->ep[0];
+	struct s3c2410_request	*req;
+	struct usb_ctrlrequest	crq;
+
+	if (list_empty(&ep->queue))
+		req = NULL;
+	else
+		req = list_entry(ep->queue.next, struct s3c2410_request, queue);
+
+	/* We make the assumption that S3C2410_UDC_IN_CSR1_REG equal to
+	 * S3C2410_UDC_EP0_CSR_REG when index is zero */
+
+	udc_write(0, S3C2410_UDC_INDEX_REG);
+	ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+
+	dprintk(DEBUG_NORMAL, "ep0csr %x ep0state %s\n",
+		ep0csr, ep0states[dev->ep0state]);
+
+	/* clear stall status */
+	if (ep0csr & S3C2410_UDC_EP0_CSR_SENTSTL) {
+		s3c2410_udc_nuke(dev, ep, -EPIPE);
+		dprintk(DEBUG_NORMAL, "... clear SENT_STALL ...\n");
+		s3c2410_udc_clear_ep0_sst(base_addr);
+		dev->ep0state = EP0_IDLE;
+		return;
+	}
+
+	/* clear setup end */
+	if (ep0csr & S3C2410_UDC_EP0_CSR_SE) {
+		dprintk(DEBUG_NORMAL, "... serviced SETUP_END ...\n");
+		s3c2410_udc_nuke(dev, ep, 0);
+		s3c2410_udc_clear_ep0_se(base_addr);
+		dev->ep0state = EP0_IDLE;
+	}
+
+	switch (dev->ep0state) {
+	case EP0_IDLE:
+		s3c2410_udc_handle_ep0_idle(dev, ep, &crq, ep0csr);
+		break;
+
+	case EP0_IN_DATA_PHASE:			/* GET_DESCRIPTOR etc */
+		dprintk(DEBUG_NORMAL, "EP0_IN_DATA_PHASE ... what now?\n");
+		if (!(ep0csr & S3C2410_UDC_EP0_CSR_IPKRDY) && req) {
+			s3c2410_udc_write_fifo(ep, req);
+		}
+		break;
+
+	case EP0_OUT_DATA_PHASE:		/* SET_DESCRIPTOR etc */
+		dprintk(DEBUG_NORMAL, "EP0_OUT_DATA_PHASE ... what now?\n");
+		if ((ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY) && req ) {
+			s3c2410_udc_read_fifo(ep,req);
+		}
+		break;
+
+	case EP0_END_XFER:
+		dprintk(DEBUG_NORMAL, "EP0_END_XFER ... what now?\n");
+		dev->ep0state = EP0_IDLE;
+		break;
+
+	case EP0_STALL:
+		dprintk(DEBUG_NORMAL, "EP0_STALL ... what now?\n");
+		dev->ep0state = EP0_IDLE;
+		break;
+	}
+}
+
+/*
+ *	handle_ep - Manage I/O endpoints
+ */
+
+static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep)
+{
+	struct s3c2410_request	*req;
+	int			is_in = ep->bEndpointAddress & USB_DIR_IN;
+	u32			ep_csr1;
+	u32			idx;
+
+	if (likely (!list_empty(&ep->queue)))
+		req = list_entry(ep->queue.next,
+				struct s3c2410_request, queue);
+	else
+		req = NULL;
+
+	idx = ep->bEndpointAddress & 0x7F;
+
+	if (is_in) {
+		udc_write(idx, S3C2410_UDC_INDEX_REG);
+		ep_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+		dprintk(DEBUG_VERBOSE, "ep%01d write csr:%02x %d\n",
+			idx, ep_csr1, req ? 1 : 0);
+
+		if (ep_csr1 & S3C2410_UDC_ICSR1_SENTSTL) {
+			dprintk(DEBUG_VERBOSE, "st\n");
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			udc_write(ep_csr1 & ~S3C2410_UDC_ICSR1_SENTSTL,
+					S3C2410_UDC_IN_CSR1_REG);
+			return;
+		}
+
+		if (!(ep_csr1 & S3C2410_UDC_ICSR1_PKTRDY) && req) {
+			s3c2410_udc_write_fifo(ep,req);
+		}
+	} else {
+		udc_write(idx, S3C2410_UDC_INDEX_REG);
+		ep_csr1 = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+		dprintk(DEBUG_VERBOSE, "ep%01d rd csr:%02x\n", idx, ep_csr1);
+
+		if (ep_csr1 & S3C2410_UDC_OCSR1_SENTSTL) {
+			udc_write(idx, S3C2410_UDC_INDEX_REG);
+			udc_write(ep_csr1 & ~S3C2410_UDC_OCSR1_SENTSTL,
+					S3C2410_UDC_OUT_CSR1_REG);
+			return;
+		}
+
+		if ((ep_csr1 & S3C2410_UDC_OCSR1_PKTRDY) && req) {
+			s3c2410_udc_read_fifo(ep,req);
+		}
+	}
+}
+
+#include <mach/regs-irq.h>
+
+/*
+ *	s3c2410_udc_irq - interrupt handler
+ */
+static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
+{
+	struct s3c2410_udc *dev = _dev;
+	int usb_status;
+	int usbd_status;
+	int pwr_reg;
+	int ep0csr;
+	int i;
+	u32 idx, idx2;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* Driver connected ? */
+	if (!dev->driver) {
+		/* Clear interrupts */
+		udc_write(udc_read(S3C2410_UDC_USB_INT_REG),
+				S3C2410_UDC_USB_INT_REG);
+		udc_write(udc_read(S3C2410_UDC_EP_INT_REG),
+				S3C2410_UDC_EP_INT_REG);
+	}
+
+	/* Save index */
+	idx = udc_read(S3C2410_UDC_INDEX_REG);
+
+	/* Read status registers */
+	usb_status = udc_read(S3C2410_UDC_USB_INT_REG);
+	usbd_status = udc_read(S3C2410_UDC_EP_INT_REG);
+	pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
+
+	udc_writeb(base_addr, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+	ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+
+	dprintk(DEBUG_NORMAL, "usbs=%02x, usbds=%02x, pwr=%02x ep0csr=%02x\n",
+		usb_status, usbd_status, pwr_reg, ep0csr);
+
+	/*
+	 * Now, handle interrupts. There's two types :
+	 * - Reset, Resume, Suspend coming -> usb_int_reg
+	 * - EP -> ep_int_reg
+	 */
+
+	/* RESET */
+	if (usb_status & S3C2410_UDC_USBINT_RESET) {
+		/* two kind of reset :
+		 * - reset start -> pwr reg = 8
+		 * - reset end   -> pwr reg = 0
+		 **/
+		dprintk(DEBUG_NORMAL, "USB reset csr %x pwr %x\n",
+			ep0csr, pwr_reg);
+
+		dev->gadget.speed = USB_SPEED_UNKNOWN;
+		udc_write(0x00, S3C2410_UDC_INDEX_REG);
+		udc_write((dev->ep[0].ep.maxpacket & 0x7ff) >> 3,
+				S3C2410_UDC_MAXP_REG);
+		dev->address = 0;
+
+		dev->ep0state = EP0_IDLE;
+		dev->gadget.speed = USB_SPEED_FULL;
+
+		/* clear interrupt */
+		udc_write(S3C2410_UDC_USBINT_RESET,
+				S3C2410_UDC_USB_INT_REG);
+
+		udc_write(idx, S3C2410_UDC_INDEX_REG);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return IRQ_HANDLED;
+	}
+
+	/* RESUME */
+	if (usb_status & S3C2410_UDC_USBINT_RESUME) {
+		dprintk(DEBUG_NORMAL, "USB resume\n");
+
+		/* clear interrupt */
+		udc_write(S3C2410_UDC_USBINT_RESUME,
+				S3C2410_UDC_USB_INT_REG);
+
+		if (dev->gadget.speed != USB_SPEED_UNKNOWN
+				&& dev->driver
+				&& dev->driver->resume)
+			dev->driver->resume(&dev->gadget);
+	}
+
+	/* SUSPEND */
+	if (usb_status & S3C2410_UDC_USBINT_SUSPEND) {
+		dprintk(DEBUG_NORMAL, "USB suspend\n");
+
+		/* clear interrupt */
+		udc_write(S3C2410_UDC_USBINT_SUSPEND,
+				S3C2410_UDC_USB_INT_REG);
+
+		if (dev->gadget.speed != USB_SPEED_UNKNOWN
+				&& dev->driver
+				&& dev->driver->suspend)
+			dev->driver->suspend(&dev->gadget);
+
+		dev->ep0state = EP0_IDLE;
+	}
+
+	/* EP */
+	/* control traffic */
+	/* check on ep0csr != 0 is not a good idea as clearing in_pkt_ready
+	 * generate an interrupt
+	 */
+	if (usbd_status & S3C2410_UDC_INT_EP0) {
+		dprintk(DEBUG_VERBOSE, "USB ep0 irq\n");
+		/* Clear the interrupt bit by setting it to 1 */
+		udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_REG);
+		s3c2410_udc_handle_ep0(dev);
+	}
+
+	/* endpoint data transfers */
+	for (i = 1; i < S3C2410_ENDPOINTS; i++) {
+		u32 tmp = 1 << i;
+		if (usbd_status & tmp) {
+			dprintk(DEBUG_VERBOSE, "USB ep%d irq\n", i);
+
+			/* Clear the interrupt bit by setting it to 1 */
+			udc_write(tmp, S3C2410_UDC_EP_INT_REG);
+			s3c2410_udc_handle_ep(&dev->ep[i]);
+		}
+	}
+
+	/* what else causes this interrupt? a receive! who is it? */
+	if (!usb_status && !usbd_status && !pwr_reg && !ep0csr) {
+		for (i = 1; i < S3C2410_ENDPOINTS; i++) {
+			idx2 = udc_read(S3C2410_UDC_INDEX_REG);
+			udc_write(i, S3C2410_UDC_INDEX_REG);
+
+			if (udc_read(S3C2410_UDC_OUT_CSR1_REG) & 0x1)
+				s3c2410_udc_handle_ep(&dev->ep[i]);
+
+			/* restore index */
+			udc_write(idx2, S3C2410_UDC_INDEX_REG);
+		}
+	}
+
+	dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", IRQ_USBD);
+
+	/* Restore old index */
+	udc_write(idx, S3C2410_UDC_INDEX_REG);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return IRQ_HANDLED;
+}
+/*------------------------- s3c2410_ep_ops ----------------------------------*/
+
+static inline struct s3c2410_ep *to_s3c2410_ep(struct usb_ep *ep)
+{
+	return container_of(ep, struct s3c2410_ep, ep);
+}
+
+static inline struct s3c2410_udc *to_s3c2410_udc(struct usb_gadget *gadget)
+{
+	return container_of(gadget, struct s3c2410_udc, gadget);
+}
+
+static inline struct s3c2410_request *to_s3c2410_req(struct usb_request *req)
+{
+	return container_of(req, struct s3c2410_request, req);
+}
+
+/*
+ *	s3c2410_udc_ep_enable
+ */
+static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
+				 const struct usb_endpoint_descriptor *desc)
+{
+	struct s3c2410_udc	*dev;
+	struct s3c2410_ep	*ep;
+	u32			max, tmp;
+	unsigned long		flags;
+	u32			csr1,csr2;
+	u32			int_en_reg;
+
+	ep = to_s3c2410_ep(_ep);
+
+	if (!_ep || !desc || ep->desc
+			|| _ep->name == ep0name
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+
+	dev = ep->dev;
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	max = usb_endpoint_maxp(desc) & 0x1fff;
+
+	local_irq_save (flags);
+	_ep->maxpacket = max & 0x7ff;
+	ep->desc = desc;
+	ep->halted = 0;
+	ep->bEndpointAddress = desc->bEndpointAddress;
+
+	/* set max packet */
+	udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+	udc_write(max >> 3, S3C2410_UDC_MAXP_REG);
+
+	/* set type, direction, address; reset fifo counters */
+	if (desc->bEndpointAddress & USB_DIR_IN) {
+		csr1 = S3C2410_UDC_ICSR1_FFLUSH|S3C2410_UDC_ICSR1_CLRDT;
+		csr2 = S3C2410_UDC_ICSR2_MODEIN|S3C2410_UDC_ICSR2_DMAIEN;
+
+		udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+		udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
+		udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+		udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
+	} else {
+		/* don't flush in fifo or it will cause endpoint interrupt */
+		csr1 = S3C2410_UDC_ICSR1_CLRDT;
+		csr2 = S3C2410_UDC_ICSR2_DMAIEN;
+
+		udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+		udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
+		udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+		udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
+
+		csr1 = S3C2410_UDC_OCSR1_FFLUSH | S3C2410_UDC_OCSR1_CLRDT;
+		csr2 = S3C2410_UDC_OCSR2_DMAIEN;
+
+		udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+		udc_write(csr1, S3C2410_UDC_OUT_CSR1_REG);
+		udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+		udc_write(csr2, S3C2410_UDC_OUT_CSR2_REG);
+	}
+
+	/* enable irqs */
+	int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+	udc_write(int_en_reg | (1 << ep->num), S3C2410_UDC_EP_INT_EN_REG);
+
+	/* print some debug message */
+	tmp = desc->bEndpointAddress;
+	dprintk (DEBUG_NORMAL, "enable %s(%d) ep%x%s-blk max %02x\n",
+		 _ep->name,ep->num, tmp,
+		 desc->bEndpointAddress & USB_DIR_IN ? "in" : "out", max);
+
+	local_irq_restore (flags);
+	s3c2410_udc_set_halt(_ep, 0);
+
+	return 0;
+}
+
+/*
+ * s3c2410_udc_ep_disable
+ */
+static int s3c2410_udc_ep_disable(struct usb_ep *_ep)
+{
+	struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+	unsigned long flags;
+	u32 int_en_reg;
+
+	if (!_ep || !ep->desc) {
+		dprintk(DEBUG_NORMAL, "%s not enabled\n",
+			_ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	local_irq_save(flags);
+
+	dprintk(DEBUG_NORMAL, "ep_disable: %s\n", _ep->name);
+
+	ep->desc = NULL;
+	ep->ep.desc = NULL;
+	ep->halted = 1;
+
+	s3c2410_udc_nuke (ep->dev, ep, -ESHUTDOWN);
+
+	/* disable irqs */
+	int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+	udc_write(int_en_reg & ~(1<<ep->num), S3C2410_UDC_EP_INT_EN_REG);
+
+	local_irq_restore(flags);
+
+	dprintk(DEBUG_NORMAL, "%s disabled\n", _ep->name);
+
+	return 0;
+}
+
+/*
+ * s3c2410_udc_alloc_request
+ */
+static struct usb_request *
+s3c2410_udc_alloc_request(struct usb_ep *_ep, gfp_t mem_flags)
+{
+	struct s3c2410_request *req;
+
+	dprintk(DEBUG_VERBOSE,"%s(%p,%d)\n", __func__, _ep, mem_flags);
+
+	if (!_ep)
+		return NULL;
+
+	req = kzalloc (sizeof(struct s3c2410_request), mem_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD (&req->queue);
+	return &req->req;
+}
+
+/*
+ * s3c2410_udc_free_request
+ */
+static void
+s3c2410_udc_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct s3c2410_ep	*ep = to_s3c2410_ep(_ep);
+	struct s3c2410_request	*req = to_s3c2410_req(_req);
+
+	dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
+
+	if (!ep || !_req || (!ep->desc && _ep->name != ep0name))
+		return;
+
+	WARN_ON (!list_empty (&req->queue));
+	kfree(req);
+}
+
+/*
+ *	s3c2410_udc_queue
+ */
+static int s3c2410_udc_queue(struct usb_ep *_ep, struct usb_request *_req,
+		gfp_t gfp_flags)
+{
+	struct s3c2410_request	*req = to_s3c2410_req(_req);
+	struct s3c2410_ep	*ep = to_s3c2410_ep(_ep);
+	struct s3c2410_udc	*dev;
+	u32			ep_csr = 0;
+	int			fifo_count = 0;
+	unsigned long		flags;
+
+	if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+		dprintk(DEBUG_NORMAL, "%s: invalid args\n", __func__);
+		return -EINVAL;
+	}
+
+	dev = ep->dev;
+	if (unlikely (!dev->driver
+			|| dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+		return -ESHUTDOWN;
+	}
+
+	local_irq_save (flags);
+
+	if (unlikely(!_req || !_req->complete
+			|| !_req->buf || !list_empty(&req->queue))) {
+		if (!_req)
+			dprintk(DEBUG_NORMAL, "%s: 1 X X X\n", __func__);
+		else {
+			dprintk(DEBUG_NORMAL, "%s: 0 %01d %01d %01d\n",
+				__func__, !_req->complete,!_req->buf,
+				!list_empty(&req->queue));
+		}
+
+		local_irq_restore(flags);
+		return -EINVAL;
+	}
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	dprintk(DEBUG_VERBOSE, "%s: ep%x len %d\n",
+		 __func__, ep->bEndpointAddress, _req->length);
+
+	if (ep->bEndpointAddress) {
+		udc_write(ep->bEndpointAddress & 0x7F, S3C2410_UDC_INDEX_REG);
+
+		ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN)
+				? S3C2410_UDC_IN_CSR1_REG
+				: S3C2410_UDC_OUT_CSR1_REG);
+		fifo_count = s3c2410_udc_fifo_count_out();
+	} else {
+		udc_write(0, S3C2410_UDC_INDEX_REG);
+		ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+		fifo_count = s3c2410_udc_fifo_count_out();
+	}
+
+	/* kickstart this i/o queue? */
+	if (list_empty(&ep->queue) && !ep->halted) {
+		if (ep->bEndpointAddress == 0 /* ep0 */) {
+			switch (dev->ep0state) {
+			case EP0_IN_DATA_PHASE:
+				if (!(ep_csr&S3C2410_UDC_EP0_CSR_IPKRDY)
+						&& s3c2410_udc_write_fifo(ep,
+							req)) {
+					dev->ep0state = EP0_IDLE;
+					req = NULL;
+				}
+				break;
+
+			case EP0_OUT_DATA_PHASE:
+				if ((!_req->length)
+					|| ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
+						&& s3c2410_udc_read_fifo(ep,
+							req))) {
+					dev->ep0state = EP0_IDLE;
+					req = NULL;
+				}
+				break;
+
+			default:
+				local_irq_restore(flags);
+				return -EL2HLT;
+			}
+		} else if ((ep->bEndpointAddress & USB_DIR_IN) != 0
+				&& (!(ep_csr&S3C2410_UDC_OCSR1_PKTRDY))
+				&& s3c2410_udc_write_fifo(ep, req)) {
+			req = NULL;
+		} else if ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
+				&& fifo_count
+				&& s3c2410_udc_read_fifo(ep, req)) {
+			req = NULL;
+		}
+	}
+
+	/* pio or dma irq handler advances the queue. */
+	if (likely (req != 0))
+		list_add_tail(&req->queue, &ep->queue);
+
+	local_irq_restore(flags);
+
+	dprintk(DEBUG_VERBOSE, "%s ok\n", __func__);
+	return 0;
+}
+
+/*
+ *	s3c2410_udc_dequeue
+ */
+static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct s3c2410_ep	*ep = to_s3c2410_ep(_ep);
+	struct s3c2410_udc	*udc;
+	int			retval = -EINVAL;
+	unsigned long		flags;
+	struct s3c2410_request	*req = NULL;
+
+	dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
+
+	if (!the_controller->driver)
+		return -ESHUTDOWN;
+
+	if (!_ep || !_req)
+		return retval;
+
+	udc = to_s3c2410_udc(ep->gadget);
+
+	local_irq_save (flags);
+
+	list_for_each_entry (req, &ep->queue, queue) {
+		if (&req->req == _req) {
+			list_del_init (&req->queue);
+			_req->status = -ECONNRESET;
+			retval = 0;
+			break;
+		}
+	}
+
+	if (retval == 0) {
+		dprintk(DEBUG_VERBOSE,
+			"dequeued req %p from %s, len %d buf %p\n",
+			req, _ep->name, _req->length, _req->buf);
+
+		s3c2410_udc_done(ep, req, -ECONNRESET);
+	}
+
+	local_irq_restore (flags);
+	return retval;
+}
+
+/*
+ * s3c2410_udc_set_halt
+ */
+static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value)
+{
+	struct s3c2410_ep	*ep = to_s3c2410_ep(_ep);
+	u32			ep_csr = 0;
+	unsigned long		flags;
+	u32			idx;
+
+	if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+		dprintk(DEBUG_NORMAL, "%s: inval 2\n", __func__);
+		return -EINVAL;
+	}
+
+	local_irq_save (flags);
+
+	idx = ep->bEndpointAddress & 0x7F;
+
+	if (idx == 0) {
+		s3c2410_udc_set_ep0_ss(base_addr);
+		s3c2410_udc_set_ep0_de_out(base_addr);
+	} else {
+		udc_write(idx, S3C2410_UDC_INDEX_REG);
+		ep_csr = udc_read((ep->bEndpointAddress &USB_DIR_IN)
+				? S3C2410_UDC_IN_CSR1_REG
+				: S3C2410_UDC_OUT_CSR1_REG);
+
+		if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
+			if (value)
+				udc_write(ep_csr | S3C2410_UDC_ICSR1_SENDSTL,
+					S3C2410_UDC_IN_CSR1_REG);
+			else {
+				ep_csr &= ~S3C2410_UDC_ICSR1_SENDSTL;
+				udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
+				ep_csr |= S3C2410_UDC_ICSR1_CLRDT;
+				udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
+			}
+		} else {
+			if (value)
+				udc_write(ep_csr | S3C2410_UDC_OCSR1_SENDSTL,
+					S3C2410_UDC_OUT_CSR1_REG);
+			else {
+				ep_csr &= ~S3C2410_UDC_OCSR1_SENDSTL;
+				udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
+				ep_csr |= S3C2410_UDC_OCSR1_CLRDT;
+				udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
+			}
+		}
+	}
+
+	ep->halted = value ? 1 : 0;
+	local_irq_restore (flags);
+
+	return 0;
+}
+
+static const struct usb_ep_ops s3c2410_ep_ops = {
+	.enable		= s3c2410_udc_ep_enable,
+	.disable	= s3c2410_udc_ep_disable,
+
+	.alloc_request	= s3c2410_udc_alloc_request,
+	.free_request	= s3c2410_udc_free_request,
+
+	.queue		= s3c2410_udc_queue,
+	.dequeue	= s3c2410_udc_dequeue,
+
+	.set_halt	= s3c2410_udc_set_halt,
+};
+
+/*------------------------- usb_gadget_ops ----------------------------------*/
+
+/*
+ *	s3c2410_udc_get_frame
+ */
+static int s3c2410_udc_get_frame(struct usb_gadget *_gadget)
+{
+	int tmp;
+
+	dprintk(DEBUG_VERBOSE, "%s()\n", __func__);
+
+	tmp = udc_read(S3C2410_UDC_FRAME_NUM2_REG) << 8;
+	tmp |= udc_read(S3C2410_UDC_FRAME_NUM1_REG);
+	return tmp;
+}
+
+/*
+ *	s3c2410_udc_wakeup
+ */
+static int s3c2410_udc_wakeup(struct usb_gadget *_gadget)
+{
+	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+	return 0;
+}
+
+/*
+ *	s3c2410_udc_set_selfpowered
+ */
+static int s3c2410_udc_set_selfpowered(struct usb_gadget *gadget, int value)
+{
+	struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+	if (value)
+		udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
+	else
+		udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+
+	return 0;
+}
+
+static void s3c2410_udc_disable(struct s3c2410_udc *dev);
+static void s3c2410_udc_enable(struct s3c2410_udc *dev);
+
+static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
+{
+	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+	if (udc_info && (udc_info->udc_command ||
+		gpio_is_valid(udc_info->pullup_pin))) {
+
+		if (is_on)
+			s3c2410_udc_enable(udc);
+		else {
+			if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+				if (udc->driver && udc->driver->disconnect)
+					udc->driver->disconnect(&udc->gadget);
+
+			}
+			s3c2410_udc_disable(udc);
+		}
+	}
+	else
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static int s3c2410_udc_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+	udc->vbus = (is_active != 0);
+	s3c2410_udc_set_pullup(udc, is_active);
+	return 0;
+}
+
+static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+	s3c2410_udc_set_pullup(udc, is_on ? 0 : 1);
+	return 0;
+}
+
+static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
+{
+	struct s3c2410_udc	*dev = _dev;
+	unsigned int		value;
+
+	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+	value = gpio_get_value(udc_info->vbus_pin) ? 1 : 0;
+	if (udc_info->vbus_pin_inverted)
+		value = !value;
+
+	if (value != dev->vbus)
+		s3c2410_udc_vbus_session(&dev->gadget, value);
+
+	return IRQ_HANDLED;
+}
+
+static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
+{
+	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+	if (udc_info && udc_info->vbus_draw) {
+		udc_info->vbus_draw(ma);
+		return 0;
+	}
+
+	return -ENOTSUPP;
+}
+
+static int s3c2410_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *));
+static int s3c2410_udc_stop(struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops s3c2410_ops = {
+	.get_frame		= s3c2410_udc_get_frame,
+	.wakeup			= s3c2410_udc_wakeup,
+	.set_selfpowered	= s3c2410_udc_set_selfpowered,
+	.pullup			= s3c2410_udc_pullup,
+	.vbus_session		= s3c2410_udc_vbus_session,
+	.vbus_draw		= s3c2410_vbus_draw,
+	.start			= s3c2410_udc_start,
+	.stop			= s3c2410_udc_stop,
+};
+
+static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
+{
+	if (!udc_info)
+		return;
+
+	if (udc_info->udc_command) {
+		udc_info->udc_command(cmd);
+	} else if (gpio_is_valid(udc_info->pullup_pin)) {
+		int value;
+
+		switch (cmd) {
+		case S3C2410_UDC_P_ENABLE:
+			value = 1;
+			break;
+		case S3C2410_UDC_P_DISABLE:
+			value = 0;
+			break;
+		default:
+			return;
+		}
+		value ^= udc_info->pullup_pin_inverted;
+
+		gpio_set_value(udc_info->pullup_pin, value);
+	}
+}
+
+/*------------------------- gadget driver handling---------------------------*/
+/*
+ * s3c2410_udc_disable
+ */
+static void s3c2410_udc_disable(struct s3c2410_udc *dev)
+{
+	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+	/* Disable all interrupts */
+	udc_write(0x00, S3C2410_UDC_USB_INT_EN_REG);
+	udc_write(0x00, S3C2410_UDC_EP_INT_EN_REG);
+
+	/* Clear the interrupt registers */
+	udc_write(S3C2410_UDC_USBINT_RESET
+				| S3C2410_UDC_USBINT_RESUME
+				| S3C2410_UDC_USBINT_SUSPEND,
+			S3C2410_UDC_USB_INT_REG);
+
+	udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
+
+	/* Good bye, cruel world */
+	s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
+
+	/* Set speed to unknown */
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+/*
+ * s3c2410_udc_reinit
+ */
+static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
+{
+	u32 i;
+
+	/* device/ep0 records init */
+	INIT_LIST_HEAD (&dev->gadget.ep_list);
+	INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
+	dev->ep0state = EP0_IDLE;
+
+	for (i = 0; i < S3C2410_ENDPOINTS; i++) {
+		struct s3c2410_ep *ep = &dev->ep[i];
+
+		if (i != 0)
+			list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
+
+		ep->dev = dev;
+		ep->desc = NULL;
+		ep->ep.desc = NULL;
+		ep->halted = 0;
+		INIT_LIST_HEAD (&ep->queue);
+	}
+}
+
+/*
+ * s3c2410_udc_enable
+ */
+static void s3c2410_udc_enable(struct s3c2410_udc *dev)
+{
+	int i;
+
+	dprintk(DEBUG_NORMAL, "s3c2410_udc_enable called\n");
+
+	/* dev->gadget.speed = USB_SPEED_UNKNOWN; */
+	dev->gadget.speed = USB_SPEED_FULL;
+
+	/* Set MAXP for all endpoints */
+	for (i = 0; i < S3C2410_ENDPOINTS; i++) {
+		udc_write(i, S3C2410_UDC_INDEX_REG);
+		udc_write((dev->ep[i].ep.maxpacket & 0x7ff) >> 3,
+				S3C2410_UDC_MAXP_REG);
+	}
+
+	/* Set default power state */
+	udc_write(DEFAULT_POWER_STATE, S3C2410_UDC_PWR_REG);
+
+	/* Enable reset and suspend interrupt interrupts */
+	udc_write(S3C2410_UDC_USBINT_RESET | S3C2410_UDC_USBINT_SUSPEND,
+			S3C2410_UDC_USB_INT_EN_REG);
+
+	/* Enable ep0 interrupt */
+	udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
+
+	/* time to say "hello, world" */
+	s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
+}
+
+static int s3c2410_udc_start(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct s3c2410_udc *udc = the_controller;
+	int		retval;
+
+	dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name);
+
+	/* Sanity checks */
+	if (!udc)
+		return -ENODEV;
+
+	if (udc->driver)
+		return -EBUSY;
+
+	if (!bind || !driver->setup || driver->max_speed < USB_SPEED_FULL) {
+		printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
+			bind, driver->setup, driver->max_speed);
+		return -EINVAL;
+	}
+#if defined(MODULE)
+	if (!driver->unbind) {
+		printk(KERN_ERR "Invalid driver: no unbind method\n");
+		return -EINVAL;
+	}
+#endif
+
+	/* Hook the driver */
+	udc->driver = driver;
+	udc->gadget.dev.driver = &driver->driver;
+
+	/* Bind the driver */
+	if ((retval = device_add(&udc->gadget.dev)) != 0) {
+		printk(KERN_ERR "Error in device_add() : %d\n",retval);
+		goto register_error;
+	}
+
+	dprintk(DEBUG_NORMAL, "binding gadget driver '%s'\n",
+		driver->driver.name);
+
+	if ((retval = bind(&udc->gadget)) != 0) {
+		device_del(&udc->gadget.dev);
+		goto register_error;
+	}
+
+	/* Enable udc */
+	s3c2410_udc_enable(udc);
+
+	return 0;
+
+register_error:
+	udc->driver = NULL;
+	udc->gadget.dev.driver = NULL;
+	return retval;
+}
+
+static int s3c2410_udc_stop(struct usb_gadget_driver *driver)
+{
+	struct s3c2410_udc *udc = the_controller;
+
+	if (!udc)
+		return -ENODEV;
+
+	if (!driver || driver != udc->driver || !driver->unbind)
+		return -EINVAL;
+
+	dprintk(DEBUG_NORMAL, "usb_gadget_unregister_driver() '%s'\n",
+		driver->driver.name);
+
+	/* report disconnect */
+	if (driver->disconnect)
+		driver->disconnect(&udc->gadget);
+
+	driver->unbind(&udc->gadget);
+
+	device_del(&udc->gadget.dev);
+	udc->driver = NULL;
+
+	/* Disable udc */
+	s3c2410_udc_disable(udc);
+
+	return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+static struct s3c2410_udc memory = {
+	.gadget = {
+		.ops		= &s3c2410_ops,
+		.ep0		= &memory.ep[0].ep,
+		.name		= gadget_name,
+		.dev = {
+			.init_name	= "gadget",
+		},
+	},
+
+	/* control endpoint */
+	.ep[0] = {
+		.num		= 0,
+		.ep = {
+			.name		= ep0name,
+			.ops		= &s3c2410_ep_ops,
+			.maxpacket	= EP0_FIFO_SIZE,
+		},
+		.dev		= &memory,
+	},
+
+	/* first group of endpoints */
+	.ep[1] = {
+		.num		= 1,
+		.ep = {
+			.name		= "ep1-bulk",
+			.ops		= &s3c2410_ep_ops,
+			.maxpacket	= EP_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= EP_FIFO_SIZE,
+		.bEndpointAddress = 1,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+	},
+	.ep[2] = {
+		.num		= 2,
+		.ep = {
+			.name		= "ep2-bulk",
+			.ops		= &s3c2410_ep_ops,
+			.maxpacket	= EP_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= EP_FIFO_SIZE,
+		.bEndpointAddress = 2,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+	},
+	.ep[3] = {
+		.num		= 3,
+		.ep = {
+			.name		= "ep3-bulk",
+			.ops		= &s3c2410_ep_ops,
+			.maxpacket	= EP_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= EP_FIFO_SIZE,
+		.bEndpointAddress = 3,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+	},
+	.ep[4] = {
+		.num		= 4,
+		.ep = {
+			.name		= "ep4-bulk",
+			.ops		= &s3c2410_ep_ops,
+			.maxpacket	= EP_FIFO_SIZE,
+		},
+		.dev		= &memory,
+		.fifo_size	= EP_FIFO_SIZE,
+		.bEndpointAddress = 4,
+		.bmAttributes	= USB_ENDPOINT_XFER_BULK,
+	}
+
+};
+
+/*
+ *	probe - binds to the platform device
+ */
+static int s3c2410_udc_probe(struct platform_device *pdev)
+{
+	struct s3c2410_udc *udc = &memory;
+	struct device *dev = &pdev->dev;
+	int retval;
+	int irq;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	usb_bus_clock = clk_get(NULL, "usb-bus-gadget");
+	if (IS_ERR(usb_bus_clock)) {
+		dev_err(dev, "failed to get usb bus clock source\n");
+		return PTR_ERR(usb_bus_clock);
+	}
+
+	clk_enable(usb_bus_clock);
+
+	udc_clock = clk_get(NULL, "usb-device");
+	if (IS_ERR(udc_clock)) {
+		dev_err(dev, "failed to get udc clock source\n");
+		return PTR_ERR(udc_clock);
+	}
+
+	clk_enable(udc_clock);
+
+	mdelay(10);
+
+	dev_dbg(dev, "got and enabled clocks\n");
+
+	if (strncmp(pdev->name, "s3c2440", 7) == 0) {
+		dev_info(dev, "S3C2440: increasing FIFO to 128 bytes\n");
+		memory.ep[1].fifo_size = S3C2440_EP_FIFO_SIZE;
+		memory.ep[2].fifo_size = S3C2440_EP_FIFO_SIZE;
+		memory.ep[3].fifo_size = S3C2440_EP_FIFO_SIZE;
+		memory.ep[4].fifo_size = S3C2440_EP_FIFO_SIZE;
+	}
+
+	spin_lock_init (&udc->lock);
+	udc_info = pdev->dev.platform_data;
+
+	rsrc_start = S3C2410_PA_USBDEV;
+	rsrc_len   = S3C24XX_SZ_USBDEV;
+
+	if (!request_mem_region(rsrc_start, rsrc_len, gadget_name))
+		return -EBUSY;
+
+	base_addr = ioremap(rsrc_start, rsrc_len);
+	if (!base_addr) {
+		retval = -ENOMEM;
+		goto err_mem;
+	}
+
+	device_initialize(&udc->gadget.dev);
+	udc->gadget.dev.parent = &pdev->dev;
+	udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+	the_controller = udc;
+	platform_set_drvdata(pdev, udc);
+
+	s3c2410_udc_disable(udc);
+	s3c2410_udc_reinit(udc);
+
+	/* irq setup after old hardware state is cleaned up */
+	retval = request_irq(IRQ_USBD, s3c2410_udc_irq,
+			     0, gadget_name, udc);
+
+	if (retval != 0) {
+		dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval);
+		retval = -EBUSY;
+		goto err_map;
+	}
+
+	dev_dbg(dev, "got irq %i\n", IRQ_USBD);
+
+	if (udc_info && udc_info->vbus_pin > 0) {
+		retval = gpio_request(udc_info->vbus_pin, "udc vbus");
+		if (retval < 0) {
+			dev_err(dev, "cannot claim vbus pin\n");
+			goto err_int;
+		}
+
+		irq = gpio_to_irq(udc_info->vbus_pin);
+		if (irq < 0) {
+			dev_err(dev, "no irq for gpio vbus pin\n");
+			goto err_gpio_claim;
+		}
+
+		retval = request_irq(irq, s3c2410_udc_vbus_irq,
+				     IRQF_TRIGGER_RISING
+				     | IRQF_TRIGGER_FALLING | IRQF_SHARED,
+				     gadget_name, udc);
+
+		if (retval != 0) {
+			dev_err(dev, "can't get vbus irq %d, err %d\n",
+				irq, retval);
+			retval = -EBUSY;
+			goto err_gpio_claim;
+		}
+
+		dev_dbg(dev, "got irq %i\n", irq);
+	} else {
+		udc->vbus = 1;
+	}
+
+	if (udc_info && !udc_info->udc_command &&
+		gpio_is_valid(udc_info->pullup_pin)) {
+
+		retval = gpio_request_one(udc_info->pullup_pin,
+				udc_info->vbus_pin_inverted ?
+				GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+				"udc pullup");
+		if (retval)
+			goto err_vbus_irq;
+	}
+
+	retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+	if (retval)
+		goto err_add_udc;
+
+	if (s3c2410_udc_debugfs_root) {
+		udc->regs_info = debugfs_create_file("registers", S_IRUGO,
+				s3c2410_udc_debugfs_root,
+				udc, &s3c2410_udc_debugfs_fops);
+		if (!udc->regs_info)
+			dev_warn(dev, "debugfs file creation failed\n");
+	}
+
+	dev_dbg(dev, "probe ok\n");
+
+	return 0;
+
+err_add_udc:
+	if (udc_info && !udc_info->udc_command &&
+			gpio_is_valid(udc_info->pullup_pin))
+		gpio_free(udc_info->pullup_pin);
+err_vbus_irq:
+	if (udc_info && udc_info->vbus_pin > 0)
+		free_irq(gpio_to_irq(udc_info->vbus_pin), udc);
+err_gpio_claim:
+	if (udc_info && udc_info->vbus_pin > 0)
+		gpio_free(udc_info->vbus_pin);
+err_int:
+	free_irq(IRQ_USBD, udc);
+err_map:
+	iounmap(base_addr);
+err_mem:
+	release_mem_region(rsrc_start, rsrc_len);
+
+	return retval;
+}
+
+/*
+ *	s3c2410_udc_remove
+ */
+static int s3c2410_udc_remove(struct platform_device *pdev)
+{
+	struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+	unsigned int irq;
+
+	dev_dbg(&pdev->dev, "%s()\n", __func__);
+
+	usb_del_gadget_udc(&udc->gadget);
+	if (udc->driver)
+		return -EBUSY;
+
+	debugfs_remove(udc->regs_info);
+
+	if (udc_info && !udc_info->udc_command &&
+		gpio_is_valid(udc_info->pullup_pin))
+		gpio_free(udc_info->pullup_pin);
+
+	if (udc_info && udc_info->vbus_pin > 0) {
+		irq = gpio_to_irq(udc_info->vbus_pin);
+		free_irq(irq, udc);
+	}
+
+	free_irq(IRQ_USBD, udc);
+
+	iounmap(base_addr);
+	release_mem_region(rsrc_start, rsrc_len);
+
+	platform_set_drvdata(pdev, NULL);
+
+	if (!IS_ERR(udc_clock) && udc_clock != NULL) {
+		clk_disable(udc_clock);
+		clk_put(udc_clock);
+		udc_clock = NULL;
+	}
+
+	if (!IS_ERR(usb_bus_clock) && usb_bus_clock != NULL) {
+		clk_disable(usb_bus_clock);
+		clk_put(usb_bus_clock);
+		usb_bus_clock = NULL;
+	}
+
+	dev_dbg(&pdev->dev, "%s: remove ok\n", __func__);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
+{
+	s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
+
+	return 0;
+}
+
+static int s3c2410_udc_resume(struct platform_device *pdev)
+{
+	s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
+
+	return 0;
+}
+#else
+#define s3c2410_udc_suspend	NULL
+#define s3c2410_udc_resume	NULL
+#endif
+
+static const struct platform_device_id s3c_udc_ids[] = {
+	{ "s3c2410-usbgadget", },
+	{ "s3c2440-usbgadget", },
+	{ }
+};
+MODULE_DEVICE_TABLE(platform, s3c_udc_ids);
+
+static struct platform_driver udc_driver_24x0 = {
+	.driver		= {
+		.name	= "s3c24x0-usbgadget",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= s3c2410_udc_probe,
+	.remove		= s3c2410_udc_remove,
+	.suspend	= s3c2410_udc_suspend,
+	.resume		= s3c2410_udc_resume,
+	.id_table	= s3c_udc_ids,
+};
+
+static int __init udc_init(void)
+{
+	int retval;
+
+	dprintk(DEBUG_NORMAL, "%s: version %s\n", gadget_name, DRIVER_VERSION);
+
+	s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL);
+	if (IS_ERR(s3c2410_udc_debugfs_root)) {
+		printk(KERN_ERR "%s: debugfs dir creation failed %ld\n",
+			gadget_name, PTR_ERR(s3c2410_udc_debugfs_root));
+		s3c2410_udc_debugfs_root = NULL;
+	}
+
+	retval = platform_driver_register(&udc_driver_24x0);
+	if (retval)
+		goto err;
+
+	return 0;
+
+err:
+	debugfs_remove(s3c2410_udc_debugfs_root);
+	return retval;
+}
+
+static void __exit udc_exit(void)
+{
+	platform_driver_unregister(&udc_driver_24x0);
+	debugfs_remove(s3c2410_udc_debugfs_root);
+}
+
+module_init(udc_init);
+module_exit(udc_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c2410_udc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c2410_udc.h
new file mode 100644
index 0000000..1653bae
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/s3c2410_udc.h
@@ -0,0 +1,100 @@
+/*
+ * linux/drivers/usb/gadget/s3c2410_udc.h
+ * Samsung on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ *	Additional cleanups by Ben Dooks <ben-linux@fluff.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _S3C2410_UDC_H
+#define _S3C2410_UDC_H
+
+struct s3c2410_ep {
+	struct list_head		queue;
+	unsigned long			last_io;	/* jiffies timestamp */
+	struct usb_gadget		*gadget;
+	struct s3c2410_udc		*dev;
+	const struct usb_endpoint_descriptor *desc;
+	struct usb_ep			ep;
+	u8				num;
+
+	unsigned short			fifo_size;
+	u8				bEndpointAddress;
+	u8				bmAttributes;
+
+	unsigned			halted : 1;
+	unsigned			already_seen : 1;
+	unsigned			setup_stage : 1;
+};
+
+
+/* Warning : ep0 has a fifo of 16 bytes */
+/* Don't try to set 32 or 64            */
+/* also testusb 14 fails  wit 16 but is */
+/* fine with 8                          */
+#define EP0_FIFO_SIZE		 8
+#define EP_FIFO_SIZE		64
+#define DEFAULT_POWER_STATE	0x00
+
+#define S3C2440_EP_FIFO_SIZE	128
+
+static const char ep0name [] = "ep0";
+
+static const char *const ep_name[] = {
+	ep0name,                                /* everyone has ep0 */
+	/* s3c2410 four bidirectional bulk endpoints */
+	"ep1-bulk", "ep2-bulk", "ep3-bulk", "ep4-bulk",
+};
+
+#define S3C2410_ENDPOINTS       ARRAY_SIZE(ep_name)
+
+struct s3c2410_request {
+	struct list_head		queue;		/* ep's requests */
+	struct usb_request		req;
+};
+
+enum ep0_state {
+        EP0_IDLE,
+        EP0_IN_DATA_PHASE,
+        EP0_OUT_DATA_PHASE,
+        EP0_END_XFER,
+        EP0_STALL,
+};
+
+static const char *ep0states[]= {
+        "EP0_IDLE",
+        "EP0_IN_DATA_PHASE",
+        "EP0_OUT_DATA_PHASE",
+        "EP0_END_XFER",
+        "EP0_STALL",
+};
+
+struct s3c2410_udc {
+	spinlock_t			lock;
+
+	struct s3c2410_ep		ep[S3C2410_ENDPOINTS];
+	int				address;
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	struct s3c2410_request		fifo_req;
+	u8				fifo_buf[EP_FIFO_SIZE];
+	u16				devstatus;
+
+	u32				port_status;
+	int				ep0state;
+
+	unsigned			got_irq : 1;
+
+	unsigned			req_std : 1;
+	unsigned			req_config : 1;
+	unsigned			req_pending : 1;
+	u8				vbus;
+	struct dentry			*regs_info;
+};
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/serial.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/serial.c
new file mode 100644
index 0000000..665c074
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/serial.c
@@ -0,0 +1,283 @@
+/*
+ * serial.c -- USB gadget serial driver
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include "u_serial.h"
+#include "gadget_chips.h"
+
+
+/* Defines */
+
+#define GS_VERSION_STR			"v2.4"
+#define GS_VERSION_NUM			0x2400
+
+#define GS_LONG_NAME			"Gadget Serial"
+#define GS_VERSION_NAME			GS_LONG_NAME " " GS_VERSION_STR
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_acm.c"
+#include "f_obex.c"
+#include "f_serial.c"
+#include "u_serial.c"
+
+/*-------------------------------------------------------------------------*/
+
+/* Thanks to NetChip Technologies for donating this product ID.
+*
+* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+* Instead:  allocate your own, using normal USB-IF procedures.
+*/
+#define GS_VENDOR_ID			0x0525	/* NetChip */
+#define GS_PRODUCT_ID			0xa4a6	/* Linux-USB Serial Gadget */
+#define GS_CDC_PRODUCT_ID		0xa4a7	/* ... as CDC-ACM */
+#define GS_CDC_OBEX_PRODUCT_ID		0xa4a9	/* ... as CDC-OBEX */
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_DESCRIPTION_IDX		2
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = GS_VERSION_NAME,
+	[STRING_DESCRIPTION_IDX].s = NULL /* updated; f(use_acm) */,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		USB_DT_DEVICE_SIZE,
+	.bDescriptorType =	USB_DT_DEVICE,
+	.bcdUSB =		cpu_to_le16(0x0200),
+	/* .bDeviceClass = f(use_acm) */
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	/* .bMaxPacketSize0 = f(hardware) */
+	.idVendor =		cpu_to_le16(GS_VENDOR_ID),
+	/* .idProduct =	f(use_acm) */
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Module */
+MODULE_DESCRIPTION(GS_VERSION_NAME);
+MODULE_AUTHOR("Al Borchers");
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
+
+static bool use_acm = true;
+module_param(use_acm, bool, 0);
+MODULE_PARM_DESC(use_acm, "Use CDC ACM, default=yes");
+
+static bool use_obex = false;
+module_param(use_obex, bool, 0);
+MODULE_PARM_DESC(use_obex, "Use CDC OBEX, default=no");
+
+static unsigned n_ports = 1;
+module_param(n_ports, uint, 0);
+MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
+
+/*-------------------------------------------------------------------------*/
+
+static int __init serial_bind_config(struct usb_configuration *c)
+{
+	unsigned i;
+	int status = 0;
+
+	for (i = 0; i < n_ports && status == 0; i++) {
+		if (use_acm)
+			status = acm_bind_config(c, i);
+		else if (use_obex)
+			status = obex_bind_config(c, i);
+		else
+			status = gser_bind_config(c, i);
+	}
+	return status;
+}
+
+static struct usb_configuration serial_config_driver = {
+	/* .label = f(use_acm) */
+	/* .bConfigurationValue = f(use_acm) */
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes	= USB_CONFIG_ATT_SELFPOWER,
+};
+
+static int __init gs_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			status;
+
+	status = gserial_setup(cdev->gadget, n_ports);
+	if (status < 0)
+		return status;
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device description: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+
+	device_desc.iProduct = status;
+
+	/* config description */
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_DESCRIPTION_IDX].id = status;
+
+	serial_config_driver.iConfiguration = status;
+
+	/* set up other descriptors */
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(GS_VERSION_NUM | gcnum);
+	else {
+		/* this is so simple (for now, no altsettings) that it
+		 * SHOULD NOT have problems with bulk-capable hardware.
+		 * so warn about unrcognized controllers -- don't panic.
+		 *
+		 * things like configuration and altsetting numbering
+		 * can need hardware-specific attention though.
+		 */
+		pr_warning("gs_bind: controller '%s' not recognized\n",
+			gadget->name);
+		device_desc.bcdDevice =
+			cpu_to_le16(GS_VERSION_NUM | 0x0099);
+	}
+
+	if (gadget_is_otg(cdev->gadget)) {
+		serial_config_driver.descriptors = otg_desc;
+		serial_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	/* register our configuration */
+	status = usb_add_config(cdev, &serial_config_driver,
+			serial_bind_config);
+	if (status < 0)
+		goto fail;
+
+	INFO(cdev, "%s\n", GS_VERSION_NAME);
+
+	return 0;
+
+fail:
+	gserial_cleanup();
+	return status;
+}
+
+static struct usb_composite_driver gserial_driver = {
+	.name		= "g_serial",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_SUPER,
+};
+
+static int __init init(void)
+{
+	/* We *could* export two configs; that'd be much cleaner...
+	 * but neither of these product IDs was defined that way.
+	 */
+	if (use_acm) {
+		serial_config_driver.label = "CDC ACM config";
+		serial_config_driver.bConfigurationValue = 2;
+		device_desc.bDeviceClass = USB_CLASS_COMM;
+		device_desc.idProduct =
+				cpu_to_le16(GS_CDC_PRODUCT_ID);
+	} else if (use_obex) {
+		serial_config_driver.label = "CDC OBEX config";
+		serial_config_driver.bConfigurationValue = 3;
+		device_desc.bDeviceClass = USB_CLASS_COMM;
+		device_desc.idProduct =
+			cpu_to_le16(GS_CDC_OBEX_PRODUCT_ID);
+	} else {
+		serial_config_driver.label = "Generic Serial config";
+		serial_config_driver.bConfigurationValue = 1;
+		device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
+		device_desc.idProduct =
+				cpu_to_le16(GS_PRODUCT_ID);
+	}
+	strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label;
+
+	return usb_composite_probe(&gserial_driver, gs_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&gserial_driver);
+	gserial_cleanup();
+}
+module_exit(cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/storage_common.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/storage_common.c
new file mode 100644
index 0000000..8b2eaa6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/storage_common.c
@@ -0,0 +1,972 @@
+/*
+ * storage_common.c -- Common definitions for mass storage functionality
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyeight (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+/*
+ * This file requires the following identifiers used in USB strings to
+ * be defined (each of type pointer to char):
+ *  - fsg_string_manufacturer -- name of the manufacturer
+ *  - fsg_string_product      -- name of the product
+ *  - fsg_string_config       -- name of the configuration
+ *  - fsg_string_interface    -- name of the interface
+ * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS
+ * macro is defined prior to including this file.
+ */
+
+/*
+ * When FSG_NO_INTR_EP is defined fsg_fs_intr_in_desc and
+ * fsg_hs_intr_in_desc objects as well as
+ * FSG_FS_FUNCTION_PRE_EP_ENTRIES and FSG_HS_FUNCTION_PRE_EP_ENTRIES
+ * macros are not defined.
+ *
+ * When FSG_NO_DEVICE_STRINGS is defined FSG_STRING_MANUFACTURER,
+ * FSG_STRING_PRODUCT, FSG_STRING_SERIAL and FSG_STRING_CONFIG are not
+ * defined (as well as corresponding entries in string tables are
+ * missing) and FSG_STRING_INTERFACE has value of zero.
+ *
+ * When FSG_NO_OTG is defined fsg_otg_desc won't be defined.
+ */
+
+/*
+ * When FSG_BUFFHD_STATIC_BUFFER is defined when this file is included
+ * the fsg_buffhd structure's buf field will be an array of FSG_BUFLEN
+ * characters rather then a pointer to void.
+ */
+
+/*
+ * When USB_GADGET_DEBUG_FILES is defined the module param num_buffers
+ * sets the number of pipeline buffers (length of the fsg_buffhd array).
+ * The valid range of num_buffers is: num >= 2 && num <= 4.
+ */
+
+
+#include <linux/usb/storage.h>
+#include <scsi/scsi.h>
+#include <asm/unaligned.h>
+
+
+/*
+ * Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with any other driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+//#define FSG_VENDOR_ID	0x0525	/* NetChip */
+#define FSG_VENDOR_ID	0x19D2	/* NetChip */
+
+#define FSG_PRODUCT_ID	0xa4a5	/* Linux-USB File-backed Storage Gadget */
+
+
+/*-------------------------------------------------------------------------*/
+
+
+#ifndef DEBUG
+#undef VERBOSE_DEBUG
+#undef DUMP_MSGS
+#endif /* !DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VLDBG	LDBG
+#else
+#define VLDBG(lun, fmt, args...) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define LDBG(lun, fmt, args...)   dev_dbg (&(lun)->dev, fmt, ## args)
+#define LERROR(lun, fmt, args...) dev_err (&(lun)->dev, fmt, ## args)
+#define LWARN(lun, fmt, args...)  dev_warn(&(lun)->dev, fmt, ## args)
+#define LINFO(lun, fmt, args...)  dev_info(&(lun)->dev, fmt, ## args)
+
+/*
+ * Keep those macros in sync with those in
+ * include/linux/usb/composite.h or else GCC will complain.  If they
+ * are identical (the same names of arguments, white spaces in the
+ * same places) GCC will allow redefinition otherwise (even if some
+ * white space is removed or added) warning will be issued.
+ *
+ * Those macros are needed here because File Storage Gadget does not
+ * include the composite.h header.  For composite gadgets those macros
+ * are redundant since composite.h is included any way.
+ *
+ * One could check whether those macros are already defined (which
+ * would indicate composite.h had been included) or not (which would
+ * indicate we were in FSG) but this is not done because a warning is
+ * desired if definitions here differ from the ones in composite.h.
+ *
+ * We want the definitions to match and be the same in File Storage
+ * Gadget as well as Mass Storage Function (and so composite gadgets
+ * using MSF).  If someone changes them in composite.h it will produce
+ * a warning in this file when building MSF.
+ */
+#define DBG(d, fmt, args...)     dev_dbg(&(d)->gadget->dev , fmt , ## args)
+#define VDBG(d, fmt, args...)    dev_vdbg(&(d)->gadget->dev , fmt , ## args)
+#define ERROR(d, fmt, args...)   dev_err(&(d)->gadget->dev , fmt , ## args)
+#define WARNING(d, fmt, args...) dev_warn(&(d)->gadget->dev , fmt , ## args)
+#define INFO(d, fmt, args...)    dev_info(&(d)->gadget->dev , fmt , ## args)
+
+
+
+#ifdef DUMP_MSGS
+
+#  define dump_msg(fsg, /* const char * */ label,			\
+		   /* const u8 * */ buf, /* unsigned */ length) do {	\
+	if (length < 512) {						\
+		DBG(fsg, "%s, length %u:\n", label, length);		\
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,	\
+			       16, 1, buf, length, 0);			\
+	}								\
+} while (0)
+
+#  define dump_cdb(fsg) do { } while (0)
+
+#else
+
+#  define dump_msg(fsg, /* const char * */ label, \
+		   /* const u8 * */ buf, /* unsigned */ length) do { } while (0)
+
+#  ifdef VERBOSE_DEBUG
+
+#    define dump_cdb(fsg)						\
+	print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,	\
+		       16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0)		\
+
+#  else
+
+#    define dump_cdb(fsg) do { } while (0)
+
+#  endif /* VERBOSE_DEBUG */
+
+#endif /* DUMP_MSGS */
+
+/*-------------------------------------------------------------------------*/
+
+/* CBI Interrupt data structure */
+struct interrupt_data {
+	u8	bType;
+	u8	bValue;
+};
+
+#define CBI_INTERRUPT_DATA_LEN		2
+
+/* CBI Accept Device-Specific Command request */
+#define USB_CBI_ADSC_REQUEST		0x00
+
+
+/* Length of a SCSI Command Data Block */
+#define MAX_COMMAND_SIZE	16
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE				0
+#define SS_COMMUNICATION_FAILURE		0x040800
+#define SS_INVALID_COMMAND			0x052000
+#define SS_INVALID_FIELD_IN_CDB			0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE	0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED		0x052500
+#define SS_MEDIUM_NOT_PRESENT			0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED		0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION	0x062800
+#define SS_RESET_OCCURRED			0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED	0x053900
+#define SS_UNRECOVERED_READ_ERROR		0x031100
+#define SS_WRITE_ERROR				0x030c02
+#define SS_WRITE_PROTECTED			0x072700
+
+#define SK(x)		((u8) ((x) >> 16))	/* Sense Key byte, etc. */
+#define ASC(x)		((u8) ((x) >> 8))
+#define ASCQ(x)		((u8) (x))
+
+
+/*-------------------------------------------------------------------------*/
+
+
+struct fsg_lun {
+	struct file	*filp;
+	loff_t		file_length;
+	loff_t		num_sectors;
+
+	unsigned int	initially_ro:1;
+	unsigned int	ro:1;
+	unsigned int	removable:1;
+	unsigned int	cdrom:1;
+	unsigned int	prevent_medium_removal:1;
+	unsigned int	registered:1;
+	unsigned int	info_valid:1;
+	unsigned int	nofua:1;
+
+	u32		sense_data;
+	u32		sense_data_info;
+	u32		unit_attention_data;
+
+	unsigned int	blkbits;	/* Bits of logical block size of bound block device */
+	unsigned int	blksize;	/* logical block size of bound block device */
+	struct device	dev;
+
+	/*
+	 * Vendor (8 chars), product (16 chars), release (4
+	 * hexadecimal digits) and NUL byte
+	 */
+	char inquiry_string[8 + 16 + 4 + 1];
+	
+};
+
+#define fsg_lun_is_open(curlun)	((curlun)->filp != NULL)
+
+static struct fsg_lun *fsg_lun_from_dev(struct device *dev)
+{
+	return container_of(dev, struct fsg_lun, dev);
+}
+
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE	256
+#define DELAYED_STATUS	(EP0_BUFSIZE + 999)	/* An impossibly large value */
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+module_param_named(num_buffers, fsg_num_buffers, uint, S_IRUGO);
+MODULE_PARM_DESC(num_buffers, "Number of pipeline buffers");
+
+#else
+
+/*
+ * Number of buffers we will use.
+ * 2 is usually enough for good buffering pipeline
+ */
+#define fsg_num_buffers	CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
+
+#endif /* CONFIG_USB_DEBUG */
+
+/* check if fsg_num_buffers is within a valid range */
+static inline int fsg_num_buffers_validate(void)
+{
+	if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4)
+		return 0;
+	pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
+	       fsg_num_buffers, 2 ,4);
+	return -EINVAL;
+}
+
+/* Default size of buffer length. */
+// xsh~ #define FSG_BUFLEN	((u32)16384)
+//#define FSG_BUFLEN	((u32)65536)
+#define FSG_BUFLEN	((u32)16384)
+/* Maximal number of LUNs supported in mass storage function */
+#define FSG_MAX_LUNS	8
+
+enum fsg_buffer_state {
+	BUF_STATE_EMPTY = 0,
+	BUF_STATE_FULL,
+	BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+#ifdef FSG_BUFFHD_STATIC_BUFFER
+	char				buf[FSG_BUFLEN];
+#else
+	void				*buf;
+#endif
+	enum fsg_buffer_state		state;
+	struct fsg_buffhd		*next;
+
+	/*
+	 * The NetChip 2280 is faster, and handles some protocol faults
+	 * better, if we don't submit any short bulk-out read requests.
+	 * So we will record the intended request length here.
+	 */
+	unsigned int			bulk_out_intended_length;
+
+	struct usb_request		*inreq;
+	int				inreq_busy;
+	struct usb_request		*outreq;
+	int				outreq_busy;
+};
+
+enum fsg_state {
+	/* This one isn't used anywhere */
+	FSG_STATE_COMMAND_PHASE = -10,
+	FSG_STATE_DATA_PHASE,
+	FSG_STATE_STATUS_PHASE,
+
+	FSG_STATE_IDLE = 0,
+	FSG_STATE_ABORT_BULK_OUT,
+	FSG_STATE_RESET,
+	FSG_STATE_INTERFACE_CHANGE,
+	FSG_STATE_CONFIG_CHANGE,
+	FSG_STATE_DISCONNECT,
+	FSG_STATE_EXIT,
+	FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+	DATA_DIR_UNKNOWN = 0,
+	DATA_DIR_FROM_HOST,
+	DATA_DIR_TO_HOST,
+	DATA_DIR_NONE
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static inline u32 get_unaligned_be24(u8 *buf)
+{
+	return 0xffffff & (u32) get_unaligned_be32(buf - 1);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+enum {
+#ifndef FSG_NO_DEVICE_STRINGS
+	FSG_STRING_MANUFACTURER	= 1,
+	FSG_STRING_PRODUCT,
+	FSG_STRING_SERIAL,
+	FSG_STRING_CONFIG,
+#endif
+	FSG_STRING_INTERFACE
+};
+
+
+#ifndef FSG_NO_OTG
+static struct usb_otg_descriptor
+fsg_otg_desc = {
+	.bLength =		sizeof fsg_otg_desc,
+	.bDescriptorType =	USB_DT_OTG,
+
+	.bmAttributes =		USB_OTG_SRP,
+};
+#endif
+
+/* There is only one interface. */
+
+static struct usb_interface_descriptor
+fsg_intf_desc = {
+	.bLength =		sizeof fsg_intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bNumEndpoints =	2,		/* Adjusted during fsg_bind() */
+	.bInterfaceClass =	USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass =	USB_SC_SCSI,	/* Adjusted during fsg_bind() */
+	.bInterfaceProtocol =	USB_PR_BULK,	/* Adjusted during fsg_bind() */
+	.iInterface =		FSG_STRING_INTERFACE,
+};
+
+/*
+ * Three full-speed endpoint descriptors: bulk-in, bulk-out, and
+ * interrupt-in.
+ */
+
+static struct usb_endpoint_descriptor
+fsg_fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	/* wMaxPacketSize set by autoconfiguration */
+};
+
+static struct usb_endpoint_descriptor
+fsg_fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	/* wMaxPacketSize set by autoconfiguration */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_fs_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(2),
+	.bInterval =		32,	/* frames -> 32 ms */
+};
+
+#ifndef FSG_NO_OTG
+#  define FSG_FS_FUNCTION_PRE_EP_ENTRIES	2
+#else
+#  define FSG_FS_FUNCTION_PRE_EP_ENTRIES	1
+#endif
+
+#endif
+
+static struct usb_descriptor_header *fsg_fs_function[] = {
+#ifndef FSG_NO_OTG
+	(struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+	(struct usb_descriptor_header *) &fsg_intf_desc,
+	(struct usb_descriptor_header *) &fsg_fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fsg_fs_bulk_out_desc,
+#ifndef FSG_NO_INTR_EP
+	(struct usb_descriptor_header *) &fsg_fs_intr_in_desc,
+#endif
+	NULL,
+};
+
+
+/*
+ * USB 2.0 devices need to expose both high speed and full speed
+ * descriptors, unless they only run at full speed.
+ *
+ * That means alternate endpoint descriptors (bigger packets)
+ * and a "device qualifier" ... plus more construction options
+ * for the configuration descriptor.
+ */
+static struct usb_endpoint_descriptor
+fsg_hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor
+fsg_hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+	.bInterval =		1,	/* NAK every 1 uframe */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_hs_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(2),
+	.bInterval =		9,	/* 2**(9-1) = 256 uframes -> 32 ms */
+};
+
+#ifndef FSG_NO_OTG
+#  define FSG_HS_FUNCTION_PRE_EP_ENTRIES	2
+#else
+#  define FSG_HS_FUNCTION_PRE_EP_ENTRIES	1
+#endif
+
+#endif
+
+static struct usb_descriptor_header *fsg_hs_function[] = {
+#ifndef FSG_NO_OTG
+	(struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+	(struct usb_descriptor_header *) &fsg_intf_desc,
+	(struct usb_descriptor_header *) &fsg_hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fsg_hs_bulk_out_desc,
+#ifndef FSG_NO_INTR_EP
+	(struct usb_descriptor_header *) &fsg_hs_intr_in_desc,
+#endif
+	NULL,
+};
+
+static struct usb_endpoint_descriptor
+fsg_ss_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = {
+	.bLength =		sizeof(fsg_ss_bulk_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/*.bMaxBurst =		DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor
+fsg_ss_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = {
+	.bLength =		sizeof(fsg_ss_bulk_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/*.bMaxBurst =		DYNAMIC, */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_ss_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(2),
+	.bInterval =		9,	/* 2**(9-1) = 256 uframes -> 32 ms */
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_intr_in_comp_desc = {
+	.bLength =		sizeof(fsg_ss_bulk_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	.wBytesPerInterval =	cpu_to_le16(2),
+};
+
+#ifndef FSG_NO_OTG
+#  define FSG_SS_FUNCTION_PRE_EP_ENTRIES	2
+#else
+#  define FSG_SS_FUNCTION_PRE_EP_ENTRIES	1
+#endif
+
+#endif
+
+static __maybe_unused struct usb_ext_cap_descriptor fsg_ext_cap_desc = {
+	.bLength =		USB_DT_USB_EXT_CAP_SIZE,
+	.bDescriptorType =	USB_DT_DEVICE_CAPABILITY,
+	.bDevCapabilityType =	USB_CAP_TYPE_EXT,
+
+	.bmAttributes =		cpu_to_le32(USB_LPM_SUPPORT),
+};
+
+static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = {
+	.bLength =		USB_DT_USB_SS_CAP_SIZE,
+	.bDescriptorType =	USB_DT_DEVICE_CAPABILITY,
+	.bDevCapabilityType =	USB_SS_CAP_TYPE,
+
+	/* .bmAttributes = LTM is not supported yet */
+
+	.wSpeedSupported =	cpu_to_le16(USB_LOW_SPEED_OPERATION
+		| USB_FULL_SPEED_OPERATION
+		| USB_HIGH_SPEED_OPERATION
+		| USB_5GBPS_OPERATION),
+	.bFunctionalitySupport = USB_LOW_SPEED_OPERATION,
+	.bU1devExitLat =	USB_DEFAULT_U1_DEV_EXIT_LAT,
+	.bU2DevExitLat =	cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT),
+};
+
+static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = {
+	.bLength =		USB_DT_BOS_SIZE,
+	.bDescriptorType =	USB_DT_BOS,
+
+	.wTotalLength =		cpu_to_le16(USB_DT_BOS_SIZE
+				+ USB_DT_USB_EXT_CAP_SIZE
+				+ USB_DT_USB_SS_CAP_SIZE),
+
+	.bNumDeviceCaps =	2,
+};
+
+static struct usb_descriptor_header *fsg_ss_function[] = {
+#ifndef FSG_NO_OTG
+	(struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+	(struct usb_descriptor_header *) &fsg_intf_desc,
+	(struct usb_descriptor_header *) &fsg_ss_bulk_in_desc,
+	(struct usb_descriptor_header *) &fsg_ss_bulk_in_comp_desc,
+	(struct usb_descriptor_header *) &fsg_ss_bulk_out_desc,
+	(struct usb_descriptor_header *) &fsg_ss_bulk_out_comp_desc,
+#ifndef FSG_NO_INTR_EP
+	(struct usb_descriptor_header *) &fsg_ss_intr_in_desc,
+	(struct usb_descriptor_header *) &fsg_ss_intr_in_comp_desc,
+#endif
+	NULL,
+};
+
+/* Maxpacket and other transfer characteristics vary by speed. */
+static __maybe_unused struct usb_endpoint_descriptor *
+fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
+		struct usb_endpoint_descriptor *hs,
+		struct usb_endpoint_descriptor *ss)
+{
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return ss;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return hs;
+	return fs;
+}
+
+
+/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
+static struct usb_string		fsg_strings[] = {
+#ifndef FSG_NO_DEVICE_STRINGS
+	{FSG_STRING_MANUFACTURER,	fsg_string_manufacturer},
+	{FSG_STRING_PRODUCT,		fsg_string_product},
+	{FSG_STRING_SERIAL,		""},
+	{FSG_STRING_CONFIG,		fsg_string_config},
+#endif
+	{FSG_STRING_INTERFACE,		fsg_string_interface},
+	{}
+};
+
+static struct usb_gadget_strings	fsg_stringtab = {
+	.language	= 0x0409,		/* en-us */
+	.strings	= fsg_strings,
+};
+
+
+ /*-------------------------------------------------------------------------*/
+
+/*
+ * If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing.
+ */
+
+static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
+{
+	int				ro;
+	struct file			*filp = NULL;
+	int				rc = -EINVAL;
+	struct inode			*inode = NULL;
+	loff_t				size;
+	loff_t				num_sectors;
+	loff_t				min_sectors;
+
+	/* R/W if we can, R/O if we must */
+	ro = curlun->initially_ro;
+	if (!ro) {
+		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
+		if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES)
+			ro = 1;
+	}
+	if (ro)
+		filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
+	if (IS_ERR(filp)) {
+		LINFO(curlun, "unable to open backing file: %s\n", filename);
+		return PTR_ERR(filp);
+	}
+
+	if (!(filp->f_mode & FMODE_WRITE))
+		ro = 1;
+
+	if (filp->f_path.dentry)
+		inode = filp->f_path.dentry->d_inode;
+	if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
+		LINFO(curlun, "invalid file type: %s\n", filename);
+		goto out;
+	}
+
+	/*
+	 * If we can't read the file, it's no good.
+	 * If we can't write the file, use it read-only.
+	 */
+	if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
+		LINFO(curlun, "file not readable: %s\n", filename);
+		goto out;
+	}
+	if (!(filp->f_op->write || filp->f_op->aio_write))
+		ro = 1;
+
+	size = i_size_read(inode->i_mapping->host);
+	if (size < 0) {
+		LINFO(curlun, "unable to find file size: %s\n", filename);
+		rc = (int) size;
+		goto out;
+	}
+
+	if (curlun->cdrom) {
+		curlun->blksize = 2048;
+		curlun->blkbits = 11;
+	} else if (inode->i_bdev) {
+		curlun->blksize = bdev_logical_block_size(inode->i_bdev);
+		curlun->blkbits = blksize_bits(curlun->blksize);
+	} else {
+		curlun->blksize = 512;
+		curlun->blkbits = 9;
+	}
+
+	num_sectors = size >> curlun->blkbits; /* File size in logic-block-size blocks */
+	min_sectors = 1;
+	if (curlun->cdrom) {
+		min_sectors = 300;	/* Smallest track is 300 frames */
+		if (num_sectors >= 256*60*75) {
+			num_sectors = 256*60*75 - 1;
+			LINFO(curlun, "file too big: %s\n", filename);
+			LINFO(curlun, "using only first %d blocks\n",
+					(int) num_sectors);
+		}
+	}
+	if (num_sectors < min_sectors) {
+		LINFO(curlun, "file too small: %s\n", filename);
+		rc = -ETOOSMALL;
+		goto out;
+	}
+
+	get_file(filp);
+	curlun->ro = ro;
+	curlun->filp = filp;
+	curlun->file_length = size;
+	curlun->num_sectors = num_sectors;
+	if (!curlun->cdrom) 
+	{
+		curlun->nofua = 1;
+	}
+	LDBG(curlun, "open backing file: %s\n", filename);
+	rc = 0;
+
+out:
+	filp_close(filp, current->files);
+	return rc;
+}
+
+
+static void fsg_lun_close(struct fsg_lun *curlun)
+{
+	if (curlun->filp) {
+		LDBG(curlun, "close backing file\n");
+		vfs_fsync(curlun->filp, 1);  //geanfeng: force sync. may multi open.
+		fput(curlun->filp);
+		curlun->filp = NULL;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Sync the file data, don't bother with the metadata.
+ * This code was copied from fs/buffer.c:sys_fdatasync().
+ */
+static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+
+	if (curlun->ro || !filp)
+		return 0;
+	return vfs_fsync(filp, 1);
+}
+
+static void store_cdrom_address(u8 *dest, int msf, u32 addr)
+{
+	if (msf) {
+		/* Convert to Minutes-Seconds-Frames */
+		addr >>= 2;		/* Convert to 2048-byte frames */
+		addr += 2*75;		/* Lead-in occupies 2 seconds */
+		dest[3] = addr % 75;	/* Frames */
+		addr /= 75;
+		dest[2] = addr % 60;	/* Seconds */
+		addr /= 60;
+		dest[1] = addr;		/* Minutes */
+		dest[0] = 0;		/* Reserved */
+	} else {
+		/* Absolute sector */
+		put_unaligned_be32(addr, dest);
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+
+	return sprintf(buf, "%d\n", fsg_lun_is_open(curlun)
+				  ? curlun->ro
+				  : curlun->initially_ro);
+}
+
+static ssize_t fsg_show_nofua(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+
+	return sprintf(buf, "%u\n", curlun->nofua);
+}
+
+static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	char		*p;
+	ssize_t		rc;
+
+	down_read(filesem);
+	if (fsg_lun_is_open(curlun)) {	/* Get the complete pathname */
+		p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+		if (IS_ERR(p))
+			rc = PTR_ERR(p);
+		else {
+			rc = strlen(p);
+			memmove(buf, p, rc);
+			buf[rc] = '\n';		/* Add a newline */
+			buf[++rc] = 0;
+		}
+	} else {				/* No file, return 0 bytes */
+		*buf = 0;
+		rc = 0;
+	}
+	up_read(filesem);
+	return rc;
+}
+
+static ssize_t fsg_show_inquiry(struct device *dev, struct device_attribute *attr, 
+				char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	return sprintf(buf, "%s\n", curlun->inquiry_string);
+}
+
+static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	ssize_t		rc;
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	unsigned	ro;
+
+	rc = kstrtouint(buf, 2, &ro);
+	if (rc)
+		return rc;
+
+	/*
+	 * Allow the write-enable status to change only while the
+	 * backing file is closed.
+	 */
+	down_read(filesem);
+	if (fsg_lun_is_open(curlun)) {
+		LDBG(curlun, "read-only status change prevented\n");
+		rc = -EBUSY;
+	} else {
+		curlun->ro = ro;
+		curlun->initially_ro = ro;
+		LDBG(curlun, "read-only status set to %d\n", curlun->ro);
+		rc = count;
+	}
+	up_read(filesem);
+	return rc;
+}
+
+static ssize_t fsg_store_nofua(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	unsigned	nofua;
+	int		ret;
+
+	ret = kstrtouint(buf, 2, &nofua);
+	if (ret)
+		return ret;
+
+	/* Sync data when switching from async mode to sync */
+	if (!nofua && curlun->nofua)
+		fsg_lun_fsync_sub(curlun);
+
+	curlun->nofua = nofua;
+
+	return count;
+}
+
+static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	int		rc = 0;
+
+	if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
+		LWARN(curlun, "eject attempt prevented\n");
+		return -EBUSY;				/* "Door is locked" */
+	}
+
+	/* Remove a trailing newline */
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;		/* Ugh! */
+
+	/* Eject current medium */
+	down_write(filesem);
+	if (fsg_lun_is_open(curlun)) {
+		fsg_lun_close(curlun);
+		curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+	}
+
+	/* Load new medium */
+	if (count > 0 && buf[0]) {
+		rc = fsg_lun_open(curlun, buf);
+		if (rc == 0)
+			curlun->unit_attention_data =
+					SS_NOT_READY_TO_READY_TRANSITION;
+	}
+	up_write(filesem);
+	return (rc < 0 ? rc : count);
+}
+
+
+static ssize_t fsg_store_inquiry(struct device *dev, struct device_attribute *attr, 
+				const char *buf, size_t count)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	
+	if (count >= sizeof(curlun->inquiry_string))
+		return -EINVAL;
+
+	memset(curlun->inquiry_string, 0, sizeof(curlun->inquiry_string));
+	memcpy(curlun->inquiry_string, buf, count);
+	return count;
+}
+
+static ssize_t fsg_show_cdrom(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+
+	return sprintf(buf, "%u\n", curlun->cdrom);
+}
+
+static ssize_t fsg_store_cdrom(struct device *dev, struct device_attribute *attr, 
+				const char *buf, size_t count)
+{
+	ssize_t		rc;
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	unsigned	cdrom;
+
+	rc = kstrtouint(buf, 2, &cdrom);
+	if (rc)
+		return rc;
+
+	/*
+	 * Allow the write-enable status to change only while the
+	 * backing file is closed.
+	 */
+	down_read(filesem);
+	if (fsg_lun_is_open(curlun)) {
+		LDBG(curlun, "read-only status change prevented\n");
+		rc = -EBUSY;
+	} else {
+		curlun->cdrom = cdrom;
+		LDBG(curlun, "read-only status set to %d\n", curlun->cdrom);
+		rc = count;
+	}
+	up_read(filesem);
+	return rc;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_diag.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_diag.c
new file mode 100755
index 0000000..4692269
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_diag.c
@@ -0,0 +1,692 @@
+/*
+ * u_diag.c -- Debuglog-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2019 ZTE Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+	 
+#include "u_diag.h"
+
+
+#define DIAG_MAX_NUM	1
+#define DIAG_TX_QUEUE_SIZE		16
+#define RX_QUEUE_SIZE		3
+
+#define DIAG_TX_DIRECTION 0
+#define DIAG_RX_DIRECTION 1
+#define DIAG_RX_MAX_PACKET_LEN 2048
+
+char g_diag_flag = 0;
+EXPORT_SYMBOL(g_diag_flag);
+
+//#define DIAG_DRV_TEST
+
+/*
+ * The port structure holds info for each port, one for each minor number
+ * (and thus for each /dev/ node).
+ */
+struct diag_port {
+	struct mutex	lock;			/* protect open/close */
+	spinlock_t		port_lock;	/* guard port_* access */
+
+	struct u_diag		*port_usb;
+	u8			port_num;
+
+	//struct list_head	read_pool;
+	struct list_head	write_pool;
+	struct usb_request *rx_req;
+
+	int write_started;
+	int write_allocated;
+
+	int connected;
+
+	usb_diag_rx_complete_callback rx_complete_callback;
+	usb_diag_tx_complete_callback tx_complete_callback;
+};
+
+struct diag_ports{
+	//u8 num;
+	struct diag_port * diag_port[DIAG_MAX_NUM];
+};
+
+static struct diag_ports n_diag_ports = {0};
+static int n_diag_num = 0;
+static unsigned	n_ports;
+
+#ifdef DIAG_DRV_TEST
+struct semaphore diag_test_sem;
+#endif
+
+static unsigned diag_start_rx(struct diag_port *port);
+
+static void diag_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct diag_port	*port = ep->driver_data;
+	bool			disconnect = false;
+
+	/* Queue all received data until the tty layer is ready for it. */
+	spin_lock(&port->port_lock);
+	/*callback func*/
+	if(req->status == -ESHUTDOWN)
+		disconnect = true;
+	if(req->actual){
+		if(port->rx_complete_callback){ 
+		/*if req->length==0, maybe rx stopped by ourself, we should not tell apps*/
+			port->rx_complete_callback(req->buf, req->actual);
+			//req->buf will be freed in rx_complete_callback
+			req->buf = NULL;	
+		}
+	}
+	
+	if(!disconnect && port->connected)
+		diag_start_rx(port);
+	
+	spin_unlock(&port->port_lock);
+}
+
+volatile unsigned int u_diag_xfer_cnt = 0;
+volatile unsigned int u_diag_xfer_success_cnt = 0;
+volatile unsigned int u_diag_xfer_complete_cnt = 0;
+volatile unsigned int u_diag_xfer_complete_success_cnt = 0;
+static void diag_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct diag_port	*port = ep->driver_data;
+
+	spin_lock(&port->port_lock);
+	//list_add(&req->list, &port->write_pool);
+	list_add_tail(&req->list, &port->write_pool);
+	port->write_started--;
+
+	u_diag_xfer_complete_cnt++;
+
+	switch (req->status) {
+	default:
+		/* presumably a transient fault */
+		pr_warning("%s: unexpected %s status %d\n",
+				__func__, ep->name, req->status);
+		/* FALL THROUGH */
+	case 0:
+		/* normal completion */
+		//gs_start_tx(port);
+		if(port->tx_complete_callback) {
+			u_diag_xfer_complete_success_cnt++;
+			port->tx_complete_callback(req->buf);
+		}
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
+		break;
+	}
+
+	spin_unlock(&port->port_lock);
+}
+/*
+ * gdiag_free_req
+ *
+ * Free a usb_request and its buffer.
+ */
+void gdiag_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	if(req->buf) {
+		kfree(req->buf);
+		req->buf = NULL;
+	}
+	usb_ep_free_request(ep, req);
+}
+EXPORT_SYMBOL_GPL(gdiag_free_req);
+
+/* I/O glue between TTY (upper) and USB function (lower) driver layers */
+
+/*
+ * gdiag_alloc_req
+ *
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+struct usb_request *
+gdiag_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (req != NULL) {
+		if(len > 0){
+			req->length = len;
+			req->buf = kmalloc(len, kmalloc_flags);
+			if (req->buf == NULL) {
+				printk("gdiag_alloc_req,fail, no mem\n");
+				
+				usb_ep_free_request(ep, req);
+				return NULL;
+			}
+		}
+	}
+
+	return req;
+}
+EXPORT_SYMBOL_GPL(gdiag_alloc_req);
+
+static void diag_free_requests(struct usb_ep *ep, struct list_head *head,
+							 int *allocated)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ep, req);
+		if (allocated)
+			(*allocated)--;
+	}
+}
+
+static int diag_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		void (*fn)(struct usb_ep *, struct usb_request *),
+		int *allocated)
+{
+	int			i;
+	struct usb_request	*req;
+	int n = 0;
+#if 0
+	if(direction == DIAG_TX_DIRECTION)
+		n = allocated ? TX_QUEUE_SIZE - *allocated : TX_QUEUE_SIZE;
+	else
+		n = allocated ? RX_QUEUE_SIZE - *allocated : RX_QUEUE_SIZE;
+#else
+	n = allocated ? DIAG_TX_QUEUE_SIZE - *allocated : DIAG_TX_QUEUE_SIZE;
+#endif
+
+	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
+	 * do quite that many this time, don't fail ... we just won't
+	 * be as speedy as we might otherwise be.
+	 */
+	for (i = 0; i < n; i++) {
+		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+#if 0
+		if(direction == DIAG_RX_DIRECTION) {
+			if (req != NULL) {
+				req->length = DIAG_RX_MAX_PACKET_LEN;
+				req->buf = kmalloc(DIAG_RX_MAX_PACKET_LEN, GFP_ATOMIC);
+				if (req->buf == NULL) {
+					usb_ep_free_request(ep, req);
+					return NULL;
+				}
+			}
+		}
+#endif
+		if (!req)
+			return list_empty(head) ? -ENOMEM : 0;
+		req->complete = fn;
+		list_add_tail(&req->list, head);
+		if (allocated)
+			(*allocated)++;
+	}
+	return 0;
+}
+
+/*
+ * Context: caller owns port_lock, and port_usb is set
+ */
+static unsigned diag_start_rx(struct diag_port *port)
+/*
+__releases(&port->port_lock)
+__acquires(&port->port_lock)
+*/
+{
+#if 0//def RX_QUEUE_USED
+	struct list_head	*pool = &port->read_pool;
+	struct usb_ep		*out = port->port_usb->out;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+		int			status;
+
+		if (port->read_started >= QUEUE_SIZE)
+			break;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		//req->length = out->maxpacket;
+		req->length = DIAG_RX_MAX_PACKET_LEN;
+		req->buf = kmalloc(DIAG_RX_MAX_PACKET_LEN, GFP_ATOMIC);
+
+		/* drop lock while we call out; the controller driver
+		 * may need to call us back (e.g. for disconnect)
+		 */
+		spin_unlock(&port->port_lock);
+		status = usb_ep_queue(out, req, GFP_ATOMIC);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			pr_debug("%s: %s %s err %d\n",
+					__func__, "queue", out->name, status);
+			list_add(&req->list, pool);
+			break;
+		}
+		port->read_started++;
+
+		/* abort immediately after disconnect */
+		if (!port->port_usb)
+			break;
+	}
+	return port->read_started;
+#else
+	struct usb_ep		*out = NULL;
+	struct usb_request	*req = port->rx_req;
+	int			status;
+	if(NULL ==port->port_usb)
+        return 0;        
+		out = port->port_usb->out;
+	if (req == NULL) {
+		port->rx_req = usb_ep_alloc_request(out, GFP_ATOMIC);
+		if(port->rx_req == NULL)
+			BUG_ON(1);
+		req = port->rx_req; 
+		req->length = DIAG_RX_MAX_PACKET_LEN;
+		req->complete = diag_read_complete;
+	}
+
+	//GFP_KERNEL can cause mem recovery
+	if(req->buf == NULL)
+		req->buf = kzalloc(DIAG_RX_MAX_PACKET_LEN, GFP_KERNEL);
+
+	if(req->buf == NULL){
+		usb_ep_free_request(out, req);
+		BUG_ON(1);
+	}
+
+	spin_unlock(&port->port_lock);
+	status = usb_ep_queue(out, req, GFP_ATOMIC);
+	spin_lock(&port->port_lock);
+#endif
+	return 1;
+}
+
+
+static int diag_start_io(struct diag_port *port)
+{
+	//struct list_head	*head = &port->read_pool;
+	//struct usb_ep		*ep = port->port_usb->out;
+	int			status;
+	unsigned		started;
+
+	/* Allocate RX and TX I/O buffers.  We can't easily do this much
+	 * earlier (with GFP_KERNEL) because the requests are coupled to
+	 * endpoints, as are the packet sizes we'll be using.  Different
+	 * configurations may use different endpoints with a given port;
+	 * and high speed vs full speed changes packet sizes too.
+	 */
+#if 0
+	status = diag_alloc_requests(port->port_usb->out, &port->read_pool, 
+			diag_read_complete, &port->read_allocated);
+	if (status)
+		return status;
+#endif
+
+	status = diag_alloc_requests(port->port_usb->in, &port->write_pool,
+			diag_write_complete, &port->write_allocated);
+
+	if (status) {
+		diag_free_requests(port->port_usb->in, &port->write_pool,
+			&port->write_allocated);
+		return status;
+	}
+
+	started = diag_start_rx(port);
+
+	/* unblock any pending writes into our circular buffer */
+	if (!started) {
+#if 0
+		diag_free_requests(port->port_usb->out, &port->read_pool, 
+			&port->read_allocated);
+#endif
+		diag_free_requests(port->port_usb->in, &port->write_pool,
+			&port->write_allocated);
+		status = -EIO;
+	}
+
+	return status;
+}
+
+static int
+diag_port_alloc(unsigned port_num)
+{
+	struct diag_port	*port;
+
+	port = kzalloc(sizeof(struct diag_port), GFP_KERNEL);
+	if (port == NULL)
+		return -ENOMEM;
+
+	spin_lock_init(&port->port_lock);
+
+	//INIT_LIST_HEAD(&port->read_pool);
+	//INIT_LIST_HEAD(&port->read_queue);
+	INIT_LIST_HEAD(&port->write_pool);
+
+	port->port_num = port_num;
+
+	n_diag_ports.diag_port[port_num] = port;
+
+	return 0;
+}
+
+
+int diag_do_xfer(void *buf, unsigned int len)
+{
+	struct diag_port	*port = n_diag_ports.diag_port[n_diag_num];
+	struct list_head	*pool = &port->write_pool;
+	struct usb_ep		*in = NULL;
+	int			status = 0;
+
+	u_diag_xfer_cnt++;
+	
+	if(!port->connected)
+		return -1;
+	if(port->port_usb == NULL)
+		return -1;
+	#ifdef CONFIG_PM
+	if(port->port_usb->suspend_state == 1)
+		return -1;
+	#endif
+	spin_lock(&port->port_lock);
+	if(port->port_usb)
+		in = port->port_usb->in;
+
+	if (!list_empty(&port->write_pool)) {
+		struct usb_request	*req;
+
+		if (port->write_started >= DIAG_TX_QUEUE_SIZE) {
+			spin_unlock(&port->port_lock);
+			return -1;
+		}
+		if((pool->next == NULL) || (pool->next == &port->write_pool)){
+			spin_unlock(&port->port_lock);
+			return -1;
+		}
+		
+		req = container_of(pool->next, struct usb_request, list);
+
+		req->buf = buf;
+		req->length = len;
+		
+		req->zero = ((len % in->maxpacket) == 0) ? 1: 0;
+
+		list_del_init(&req->list);
+
+		spin_unlock(&port->port_lock);
+		status = usb_ep_queue(in, req, GFP_ATOMIC);
+        spin_lock(&port->port_lock);
+
+		if (status) {
+			pr_debug("%s: %s %s err %d\n",
+					__func__, "queue", in->name, status);
+			if(port->port_usb && port->connected)
+				list_add(&req->list, &port->write_pool);
+			else
+				usb_ep_free_request(in, req);
+			spin_unlock(&port->port_lock);
+			return -1;
+		}
+
+		port->write_started++;
+
+	} else {
+		spin_unlock(&port->port_lock);
+		return -1;
+	}
+	
+	spin_unlock(&port->port_lock);
+
+	u_diag_xfer_success_cnt++;
+
+	return 0;
+}
+EXPORT_SYMBOL(diag_do_xfer);
+
+int diag_callback_register(usb_diag_rx_complete_callback rx_callback, 
+							usb_diag_tx_complete_callback tx_callback)
+{
+	struct diag_port	*port = n_diag_ports.diag_port[n_diag_num];
+	if(port == NULL)
+		return -1;
+	port->rx_complete_callback = rx_callback;
+	port->tx_complete_callback = tx_callback;
+
+	return 0;
+}
+EXPORT_SYMBOL(diag_callback_register);
+
+//test
+#ifdef DIAG_DRV_TEST
+volatile unsigned int g_test_rx_cb_cnt = 0;
+volatile char g_test_rx_buffer[50][50] = {0};
+volatile char g_test_rx_len[50] = {0};
+void test_rx_callback(void *buf, unsigned int len)
+{
+	//printk("test_rx_callback get data.\n");
+	memcpy(g_test_rx_buffer[g_test_rx_cb_cnt%50], buf, 50);
+	g_test_rx_len[g_test_rx_cb_cnt%50] = len;
+	g_test_rx_cb_cnt++;
+	kfree(buf);
+
+	up(&diag_test_sem);
+}
+
+volatile unsigned int g_test_tx_cb_cnt = 0;
+void test_tx_callback(void *buf)
+{
+	g_test_tx_cb_cnt++;
+	kfree(buf);
+}
+
+volatile unsigned int g_test_thread_cnt = 0;
+int32_t test_tx_thread(unsigned long data)
+{
+	struct sched_param param = { .sched_priority = 1 };
+
+	void *buf1 = NULL;
+
+	param.sched_priority = 15;
+	
+	sched_setscheduler(current, SCHED_FIFO, &param);
+
+	while (!kthread_should_stop()){
+		down(&diag_test_sem);
+		g_test_thread_cnt ++;
+		buf1 = kzalloc(1024, GFP_ATOMIC);
+		memset(buf1, g_test_rx_cb_cnt, 1024);
+		diag_do_xfer(buf1, 1000);
+	}
+}
+#endif
+void diag_connect_ext(struct u_diag *diag)
+{
+	struct diag_port	*port = diag->ioport;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	port->connected = 1;
+	g_diag_flag = 1;
+	spin_unlock_irqrestore(&port->port_lock, flags);	
+
+}
+
+int diag_connect(struct u_diag *diag, u8 port_num)
+{
+	struct diag_port	*port = n_diag_ports.diag_port[port_num];
+	unsigned long	flags;
+	int		status;
+	
+	n_diag_num = port_num;
+
+	/* activate the endpoints */
+	status = usb_ep_enable(diag->in);
+	if (status < 0)
+		return status;
+	diag->in->driver_data = port;
+
+	status = usb_ep_enable(diag->out);
+	if (status < 0)
+		return status;
+	diag->out->driver_data = port;
+	
+	spin_lock_irqsave(&port->port_lock, flags);
+	diag->ioport = port;
+	port->port_usb = diag;
+
+	port->connected = 1;
+	g_diag_flag = 1;
+	diag_start_io(port);
+
+#if 0
+	/* if it's already open, start I/O ... and notify the serial
+	 * protocol about open/close status (connect/disconnect).
+	 */
+	if (port->open_count) {
+		diag_start_io(port);
+		if (diag->connect)
+			diag->connect(diag);
+	} else {
+		if (diag->disconnect)
+			diag->disconnect(diag);
+	}
+#endif
+
+#ifdef DIAG_DRV_TEST
+	//test
+	diag_callback_register(test_rx_callback, test_tx_callback);
+	sema_init(&diag_test_sem, 0);
+	kthread_run(test_tx_thread, NULL, "diag_test_thread/%s",
+	"0");
+#endif
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return status;
+}
+
+void diag_disconnect_ext(struct u_diag *diag)
+{
+	struct diag_port	*port = diag->ioport;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	g_diag_flag = 0;
+	port->connected = 0;
+
+	spin_unlock_irqrestore(&port->port_lock, flags);	
+
+}
+
+int diag_disconnect(struct u_diag *diag)
+{
+	struct diag_port	*port = diag->ioport;
+	unsigned long	flags;
+
+	if (!port)
+		return 0;
+	
+	g_diag_flag = 0;
+	port->connected = 0;
+
+	/* disable endpoints, aborting down any active I/O */
+	usb_ep_disable(diag->out);
+	diag->out->driver_data = NULL;
+
+	usb_ep_disable(diag->in);
+	diag->in->driver_data = NULL;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	/* REVISIT as above: how best to track this? */
+	port->port_usb = NULL;
+	diag->ioport = NULL;
+	if(port->rx_req){
+		if(port->rx_req->buf){
+			kfree(port->rx_req->buf);
+			port->rx_req->buf = NULL;
+		}
+		usb_ep_free_request(diag->out, port->rx_req);
+		port->rx_req = NULL;
+	}	
+	/* finally, free any unused/unusable I/O buffers */
+	diag_free_requests(diag->in, &port->write_pool,
+			&port->write_allocated);
+
+	port->write_allocated = port->write_started = 0;
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+    return 1;
+}
+
+int diag_setup(struct usb_gadget *g, unsigned count)
+{
+	unsigned			i;
+	int				status;
+
+	if (count == 0 || count > DIAG_MAX_NUM)
+		return -EINVAL;
+
+	/* make devices be openable */
+	for (i = 0; i < count; i++) {
+		status = diag_port_alloc(i);
+		if (status) {
+			count = i;
+			goto fail;
+		}
+		mutex_init(&n_diag_ports.diag_port[i]->lock);
+	}
+
+	n_ports = count;
+
+	return status;
+fail:
+	while (count--)
+		kfree(n_diag_ports.diag_port[count]);
+	return status;
+}
+
+/**
+ * diag_cleanup - remove diag driver and devices
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @diag_setup().
+ * Accordingly, it may need to wait until some open /dev/ files have
+ * closed.
+ *
+ * The caller must have issued @diag_disconnect() for any ports
+ * that had previously been connected, so that there is never any
+ * I/O pending when it's called.
+ */
+void diag_cleanup(void)
+{
+	unsigned	i;
+	struct diag_port	*port;
+
+	for (i = 0; i < n_ports; i++) {
+		/* prevent new opens */
+		mutex_lock(&n_diag_ports.diag_port[i]->lock);
+		port = n_diag_ports.diag_port[i];
+		n_diag_ports.diag_port[i] = NULL;
+		mutex_unlock(&n_diag_ports.diag_port[i]->lock);
+
+		WARN_ON(port->port_usb != NULL);
+
+		kfree(port);
+	}
+}
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_diag.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_diag.h
new file mode 100644
index 0000000..4585e3b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_diag.h
@@ -0,0 +1,39 @@
+/*
+ * u_diag.h -- interface to USB gadget "wincomm log" utilities
+ *
+ * Copyright (C) 2019 ZTE Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __U_DIAG_H
+#define __U_DIAG_H
+
+typedef void (*usb_diag_rx_complete_callback)(void *buf, unsigned int len);
+typedef void (*usb_diag_tx_complete_callback)(void *buf);
+
+struct u_diag {
+	struct usb_function		func;
+
+	/* port is managed by diag_{connect,disconnect} */
+	struct diag_port			*ioport;
+
+	struct usb_ep			*in;
+	struct usb_ep			*out;
+
+	/* REVISIT avoid this CDC-ACM support harder ... */
+	struct usb_cdc_line_coding port_line_coding;	/* 9600-8-N-1 etc */
+
+	/* notification callbacks */
+	void (*connect)(struct gserial *p);
+	void (*disconnect)(struct gserial *p);
+	int (*send_break)(struct gserial *p, int duration);
+#ifdef CONFIG_PM
+	u32 suspend_state;
+#endif
+};
+
+#endif /* __U_DIAG_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_ether.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_ether.c
new file mode 100755
index 0000000..c910279
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_ether.c
@@ -0,0 +1,1876 @@
+/*
+ * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <net/SI/ext_mem.h>
+
+
+#include "u_ether.h"
+#include "multi_packet.h"
+#include <mach/highspeed_debug.h>
+#include <linux/nvro_comm.h>
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used.  Each end of the link uses one address.  The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link.  (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ */
+
+#define UETH__VERSION	"29-May-2008"
+
+#define ETH_MAX_NUM	4
+
+extern int (*fast_from_driver)(struct sk_buff *skb, struct net_device* dev);
+
+#if 0  //remove to u_ether.h
+struct eth_dev {
+	/* lock is held while accessing port_usb
+	 * or updating its backlink port_usb->ioport
+	 */
+	spinlock_t		lock;
+	struct gether		*port_usb;
+
+	struct net_device	*net;
+	struct usb_gadget	*gadget;
+
+	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
+	struct list_head	tx_reqs, rx_reqs;
+	atomic_t		tx_qlen;
+
+	struct sk_buff_head	rx_frames;
+
+	unsigned		header_len;
+	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
+	int			(*unwrap)(struct gether *,
+						struct sk_buff *skb,
+						struct sk_buff_head *list);
+
+	struct work_struct	work;
+
+	struct work_struct inform_work;	 /* inform USB netCard state( online or offline ) */
+	int		eth_state; /* 0: offline,  1:online */
+	
+	unsigned long		todo;
+#define	WORK_RX_MEMORY		0
+
+	bool			zlp;
+	u8			host_mac[ETH_ALEN];
+};
+#endif
+
+struct eth_devs{
+	//u8 num;
+	struct eth_dev * eth_dev[ETH_MAX_NUM];
+};
+unsigned long long g_test_xmit_pktnum = 0;
+unsigned long long g_test_xmit_pkterr1 = 0;
+unsigned long long g_test_xmit_pkterr2 = 0;
+unsigned long long g_test_xmit_pkterr3 = 0;
+unsigned long long g_test_rx_pkt = 0;
+unsigned long long g_test_rx_complt_pkt = 0;
+
+/*-------------------------------------------------------------------------*/
+
+#define RX_EXTRA	20	/* bytes guarding against rx overflows */
+
+#define DEFAULT_QLEN	2	/* double buffering by default */
+
+
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+
+static unsigned qmult = 5;
+module_param(qmult, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
+
+#else	/* full speed (low speed doesn't do bulk) */
+//#define qmult		512
+static unsigned int qmult = 512;
+#define TX_QMULT    128//512
+#endif
+
+/* for dual-speed hardware, use deeper queues at high/super speed */
+static inline int qlen(struct usb_gadget *gadget)
+{
+	USBSTACK_DBG("usb ether buf num:%d, skbnum:%d, usbSpeed:%d", qmult * DEFAULT_QLEN, gether_ether_skb_num(), gadget->speed);
+#if 0
+	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+					    gadget->speed == USB_SPEED_SUPER)){
+		qmult = gether_ether_skb_num();
+		return qmult * DEFAULT_QLEN;
+	}
+	else{
+#endif		
+		qmult = gether_ether_skb_num();
+		return qmult * DEFAULT_QLEN;
+	//}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* REVISIT there must be a better way than having two sets
+ * of debug calls ...
+ */
+
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+	printk(level "%s: " fmt , (d)->net->name , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG	DBG
+#else
+#define VDBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define INFO(dev, fmt, args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+static int ueth_change_mtu(struct net_device *net, int new_mtu)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	unsigned long	flags;
+	int		status = 0;
+
+	/* don't change MTU on "live" link (peer won't know) */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb)
+		status = -EBUSY;
+	else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+		status = -ERANGE;
+	else
+		net->mtu = new_mtu;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return status;
+}
+
+static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+
+	strlcpy(p->driver, "g_ether", sizeof p->driver);
+	strlcpy(p->version, UETH__VERSION, sizeof p->version);
+	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
+	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+}
+
+/* REVISIT can also support:
+ *   - WOL (by tracking suspends and issuing remote wakeup)
+ *   - msglevel (implies updated messaging)
+ *   - ... probably more ethtool ops
+ */
+
+static const struct ethtool_ops ops = {
+	.get_drvinfo = eth_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
+
+static void defer_kevent(struct eth_dev *dev, int flag)
+{
+	if (test_and_set_bit(flag, &dev->todo))
+		return;
+	if (!schedule_work(&dev->work))
+		ERROR(dev, "kevent %d may have been dropped\n", flag);
+	else
+		USBSTACK_DBG("kevent %d scheduled\n", flag);
+}
+
+static void rx_complete(struct usb_ep *ep, struct usb_request *req);
+void u_ether_rx_vnic_packet_list(void);
+void u_ether_tx_vnic_packet_list(void);
+static void  mbim_loop_test_rx_callback(struct usb_ep *ep, struct usb_request *req);
+static void  mbim_loop_test_tx_callback(struct usb_ep *ep, struct usb_request *req) ;
+
+
+static void test_rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+     mbim_loop_test_rx_callback(ep,req);
+
+}
+
+static void test_tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+    mbim_loop_test_tx_callback(ep,req);
+}
+
+
+static int
+rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct sk_buff	*skb;
+	int		retval = -ENOMEM;
+	size_t		size = 0;
+	struct usb_ep	*out;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb)
+		out = dev->port_usb->out_ep;
+	else
+		out = NULL;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (!out)
+		return -ENOTCONN;
+
+
+	/* Padding up to RX_EXTRA handles minor disagreements with host.
+	 * Normally we use the USB "terminate on short read" convention;
+	 * so allow up to (N*maxpacket), since that memory is normally
+	 * already allocated.  Some hardware doesn't deal well with short
+	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
+	 * byte off the end (to force hardware errors on overflow).
+	 *
+	 * RNDIS uses internal framing, and explicitly allows senders to
+	 * pad to end-of-packet.  That's potentially nice for speed, but
+	 * means receivers can't recover lost synch on their own (because
+	 * new packets don't only start after a short RX).
+	 */
+	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
+	size += dev->port_usb->header_len;
+#if 0
+/***  cancel 512Bytes align, per SKB Bytes form 4K Bytes to 2K Bytes ***/
+	size += out->maxpacket - 1;
+	size -= size % out->maxpacket;
+#endif
+	if (dev->port_usb->is_fixed)
+		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
+
+	//skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+
+	skb = dev_alloc_skb(size);
+	if (skb == NULL) {
+		DBG(dev, "no rx skb\n");
+		goto enomem;
+	}
+
+	/* Some platforms perform better when IP packets are aligned,
+	 * but on at least one, checksumming fails otherwise.  Note:
+	 * RNDIS headers involve variable numbers of LE32 values.
+	 */
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	req->buf = skb->data;
+	req->length = size;
+	//req->complete = rx_complete;
+	req->context = skb;
+    req->status = 0;
+    if(atomic_read(&dev->work_mode))
+    {
+    	//printk("rx_submit, complete is test\n");
+        req->complete = test_rx_complete ;
+    }
+    else
+    {
+	//printk("rx_submit, complete is normal\n");
+        req->complete = rx_complete;
+    }
+    
+
+	//if(g_VNIC_MultiPacket_MaxNum ==1 )
+	//	retval = usb_ep_queue(out, req, GFP_ATOMIC);
+	//else	
+#if 1
+		retval = multi_packet_rx_queue(out, req, GFP_ATOMIC);
+		g_test_rx_pkt++;
+#else
+	if(strstr(dev->net->name, "rndis"))
+	{
+		retval = multi_packet_rx_queue(out, req, GFP_ATOMIC);
+	}	
+	else
+	{
+		retval = usb_ep_queue(out, req, GFP_ATOMIC);
+	}
+#endif
+	if (retval == -ENOMEM && (atomic_read(&dev->work_mode) == 0)){
+enomem:
+		defer_kevent(dev, WORK_RX_MEMORY);
+	}
+	if (retval) {
+		USBSTACK_DBG("rx submit --> %d\n", retval);
+		if (skb)
+			dev_kfree_skb_any(skb);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		list_add_tail(&req->list, &dev->rx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return retval;
+}
+
+int g_net_rx_cnt = 0;
+extern int get_vnic_multi_packet_num(void);
+static void rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context, *skb2;
+	struct eth_dev	*dev = ep->driver_data;
+	int		status = req->status;
+	unsigned long flags;
+	if(atomic_read(&dev->work_mode) == 1){
+		printk("rx_complete, loopback mode,\r\n");
+		mbim_loop_test_rx_callback(ep, req);
+		return;
+	}
+	int packet_num = get_vnic_multi_packet_num();
+		g_test_rx_complt_pkt++;
+	//printk("usb ether rx_complete status:%d, len:%d\n", status, req->actual);
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		skb_put(skb, req->actual);
+
+		if (dev->unwrap) {
+			unsigned long	flags;
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->port_usb) {
+				status = dev->unwrap(dev->port_usb,
+							skb,
+							&dev->rx_frames);
+			} else {
+				dev_kfree_skb_any(skb);
+				status = -ENOTCONN;
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+		} else {
+			skb_queue_tail(&dev->rx_frames, skb);
+		}
+		
+		skb = NULL;
+
+		skb2 = skb_dequeue(&dev->rx_frames);
+		while (skb2) {
+			if (status < 0
+					|| ETH_HLEN > skb2->len
+					|| skb2->len > ETH_FRAME_LEN) {
+				dev->net->stats.rx_errors++;
+				dev->net->stats.rx_length_errors++;
+				DBG(dev, "rx length %d\n", skb2->len);
+				dev_kfree_skb_any(skb2);
+				goto next_frame;
+			}
+#if 1
+		if((strstr(dev->port_usb->func.name, "rndis"))||
+			(strstr(dev->port_usb->func.name, "mbim")) ||
+			(strstr(dev->port_usb->func.name, "cdc_ethernet")) ){
+	            if (fast_from_driver && fast_from_driver(skb2, dev->net))
+	            {
+	//                print_sun1("<--- usb fastnat okokokokok !!!! \n");
+	                goto next_frame;
+	            }
+		}
+#else
+		if(strstr(dev->port_usb->func.name, "rndis")){
+			if (fast_from_driver && fast_from_driver(skb2, dev->net))
+			{
+//                print_sun1("<--- usb fastnat okokokokok !!!! \n");
+
+				goto next_frame;
+			}
+		} else if(strstr(dev->port_usb->func.name, "cdc_ethernet")){
+			
+			if(((g_net_rx_cnt++)% 41) != 0) {
+				if (fast_from_driver && fast_from_driver(skb2, dev->net))
+				{
+	//                print_sun1("<--- usb fastnat okokokokok !!!! \n");
+
+					goto next_frame;
+				}
+			}
+			
+		}
+#endif
+//			print_sun1("<--- usb fastnat failed !!!! \n");
+
+			skb2->protocol = eth_type_trans(skb2, dev->net);
+			dev->net->stats.rx_packets++;
+			dev->net->stats.rx_bytes += skb2->len;
+
+			/* no buffer copies needed, unless hardware can't
+			 * use skb buffers.
+			 */
+#if 0
+			usb_dbg_printf("usb ether Send the packet\n");
+			status = netif_rx(skb2);
+#else
+			/* Send the packet */
+			if (in_interrupt()) {
+				netif_rx(skb2);
+			} else {
+				netif_rx_ni(skb2);
+			}
+#endif
+
+next_frame:
+			skb2 = skb_dequeue(&dev->rx_frames);
+		}
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		VDBG(dev, "rx shutdown, code %d\n", status);
+		goto quiesce;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		/* endpoint reset */
+		DBG(dev, "rx %s reset\n", ep->name);
+		defer_kevent(dev, WORK_RX_MEMORY);
+quiesce:
+		dev_kfree_skb_any(skb);
+		goto clean;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		dev->net->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		dev->net->stats.rx_errors++;
+		DBG(dev, "rx status %d\n", status);
+		break;
+	}
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+	if (!netif_running(dev->net)) {
+clean:
+		spin_lock_irqsave(&dev->req_lock, flags);
+		list_add_tail(&req->list, &dev->rx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		req = NULL;
+	}
+	if (req)
+		rx_submit(dev, req, GFP_ATOMIC);
+}
+
+static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
+{
+	unsigned		i;
+	struct usb_request	*req;
+
+	if (!n)
+		return -ENOMEM;
+
+	/* queue/recycle up to N requests */
+	i = n;
+	list_for_each_entry(req, list, list) {
+		if (i-- == 0)
+			goto extra;
+	}
+	while (i--) {
+		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+		if (!req)
+			return list_empty(list) ? -ENOMEM : 0;
+		list_add_tail(&req->list, list);
+	}
+	return 0;
+
+extra:
+	/* free extras */
+	for (;;) {
+		struct list_head	*next;
+
+		next = req->list.next;
+		list_del(&req->list);
+		usb_ep_free_request(ep, req);
+
+		if (next == list)
+			break;
+
+		req = container_of(next, struct usb_request, list);
+	}
+	return 0;
+}
+
+static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
+{
+	int	status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	status = prealloc(&dev->tx_reqs, link->in_ep, (TX_QMULT*DEFAULT_QLEN));
+	if (status < 0)
+		goto fail;
+	status = prealloc(&dev->rx_reqs, link->out_ep, n);
+	if (status < 0)
+		goto fail;
+	goto done;
+fail:
+	DBG(dev, "can't alloc requests\n");
+done:
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	return status;
+}
+
+void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	struct usb_request	*req;
+	unsigned long		flags;
+    unsigned int        skb_times = 0;
+	
+	/* fill unused rxq slots with some skb */
+	spin_lock_irqsave(&dev->req_lock, flags);
+	while (!list_empty(&dev->rx_reqs)) {
+		skb_times++;
+		req = container_of(dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del_init(&req->list);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		if (rx_submit(dev, req, gfp_flags) < 0) {
+			if(atomic_read(&dev->work_mode) == 0)
+				defer_kevent(dev, WORK_RX_MEMORY);
+			return;
+		}
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	USBSTACK_DBG("%s, %u skbtime:%d, qmult:%d, skbnum:%d", __func__, __LINE__, skb_times, qmult, gether_ether_skb_num());
+}
+
+static void eth_work(struct work_struct *work)
+{
+	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
+
+	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
+		if (netif_running(dev->net))
+			rx_fill(dev, GFP_KERNEL);
+	}
+
+	if (dev->todo)
+		USBSTACK_DBG("work done, flags = 0x%lx\n", dev->todo);
+}
+
+static void eth_inform_work(struct work_struct *inform_work)
+{
+	struct eth_dev	*dev = container_of(inform_work, struct eth_dev, inform_work);
+	
+	USBSTACK_DBG("%s --Inform USB net state: %d, reset:%d\n", __func__, dev->eth_state, dev->net_func_reset);
+	
+	if(dev->eth_state == dev->work_state){
+		printk("%s:%d eth_state already inform:%d\n", __func__, __LINE__,dev->eth_state);
+		USBSTACK_DBG("%s:%d already inform", __func__, __LINE__);
+		return;
+	}
+	if(dev->net_func_reset == 1)
+	{
+        dev->net_func_reset = 0 ;
+        kobject_uevent(&dev->net->dev.kobj, KOBJ_CHANGE);
+    }
+    
+	if(dev->eth_state == 1){
+		kobject_uevent(&dev->net->dev.kobj, KOBJ_ONLINE);
+		dev->work_state = 1;
+	}
+	else{
+		dev->work_state = 0;
+		kobject_uevent(&dev->net->dev.kobj, KOBJ_OFFLINE);
+	}
+
+	usb_printk("%s:%d Ether uevent %d %s\n", __func__, __LINE__, (dev->eth_state), (dev->eth_state) ? "ONLINE":"OFFLINE");
+	USBSTACK_DBG("Inform USB net state: %d", dev->eth_state);
+}
+
+static void tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	unsigned long			flags;
+
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = ep->driver_data;
+
+	switch (req->status) {
+	default:
+		dev->net->stats.tx_errors++;
+		VDBG(dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		break;
+	case 0:
+		dev->net->stats.tx_bytes += skb->len;
+	}
+	dev->net->stats.tx_packets++;
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	list_add_tail(&req->list, &dev->tx_reqs);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	dev_kfree_skb_any(skb);
+
+	atomic_dec(&dev->tx_qlen);
+	if (netif_carrier_ok(dev->net))
+		netif_wake_queue(dev->net);
+}
+
+static inline int is_promisc(u16 cdc_filter)
+{
+	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+}
+
+static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+					struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+	int			length = skb->len;
+	int			retval;
+	struct usb_request	*req = NULL;
+	unsigned long		flags;
+	struct usb_ep		*in;
+	u16			cdc_filter;
+	//printk("eth_start_xmit,eth_state:%d, length:%d\n",dev->eth_state, length);
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb) {
+		in = dev->port_usb->in_ep;
+		cdc_filter = dev->port_usb->cdc_filter;
+	} else {
+		in = NULL;
+		cdc_filter = 0;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+    g_test_xmit_pktnum++;
+	if (!in) {
+		dev_kfree_skb_any(skb);
+		g_test_xmit_pkterr1++;
+		return NETDEV_TX_OK;
+	}
+
+	/* apply outgoing CDC or RNDIS filters */
+	if (!is_promisc(cdc_filter)) {
+		u8		*dest = skb->data;
+
+		if (is_multicast_ether_addr(dest)) {
+			u16	type;
+
+			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+			 * SET_ETHERNET_MULTICAST_FILTERS requests
+			 */
+			if (is_broadcast_ether_addr(dest))
+				type = USB_CDC_PACKET_TYPE_BROADCAST;
+			else
+				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+			if (!(cdc_filter & type)) {
+				dev_kfree_skb_any(skb);
+				return NETDEV_TX_OK;
+			}
+		}
+		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+	}
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	/*
+	 * this freelist can be empty if an interrupt triggered disconnect()
+	 * and reconfigured the gadget (shutting down this queue) after the
+	 * network stack decided to xmit but before we got the spinlock.
+	 */
+	 if (dev->eth_state == 0) {
+				spin_unlock_irqrestore(&dev->req_lock, flags);
+				g_test_xmit_pkterr3++;
+				dev_kfree_skb_any(skb);
+				return NET_XMIT_DROP;
+	}
+			
+	if (list_empty(&dev->tx_reqs)
+#ifdef CONFIG_PM
+		||((dev->port_usb->suspend_state == 1) && (!strstr(dev->port_usb->func.name, "mbim")))
+#endif
+		) {
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+        g_test_xmit_pkterr2++;
+		dev_kfree_skb_any(skb);
+		return NET_XMIT_DROP;
+	}
+#if 1
+#ifdef CONFIG_PM
+	if(strstr(dev->port_usb->func.name, "mbim")){
+		if(dev->port_usb->suspend_state == 1 && (atomic_read(&dev->port_usb->wake_state) == 0)){
+			
+			struct usb_gadget *gadget = dev->port_usb->func.config->cdev->gadget;
+			atomic_set(&dev->port_usb->wake_state, 1);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
+			//if(dev->port_usb->func.config && dev->port_usb->func.config->cdev && dev->port_usb->func.config->cdev->gadget) {
+				printk("\n---eth_start_xmit, call usb_gadget_wakeup\n");
+			if(gadget){
+			    usb_gadget_wakeup(dev->port_usb->func.config->cdev->gadget);
+			} else{
+		        g_test_xmit_pkterr2++;
+				dev_kfree_skb_any(skb);
+				return NET_XMIT_DROP;			
+			}
+			atomic_set(&dev->port_usb->wake_state, 0);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		}
+	}
+#endif	
+#endif
+	req = container_of(dev->tx_reqs.next, struct usb_request, list);
+	list_del(&req->list);
+
+	/* temporarily stop TX queue when the freelist empties */
+	if (list_empty(&dev->tx_reqs))
+		netif_stop_queue(net);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 * or there's not enough space for extra headers we need
+	 */
+	if (dev->wrap) {
+		unsigned long	flags;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (dev->port_usb)
+			skb = dev->wrap(dev->port_usb, skb);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		if (!skb)
+			goto drop;
+
+		length = skb->len;
+	}
+	skb = flush_skbuf(skb);
+	req->buf = skb->data;
+	req->dma = virtaddr_to_phys(skb->data);
+	req->context = skb;
+	req->complete = tx_complete;
+	if(dev->port_usb == NULL){
+		dev->net->stats.tx_dropped++;
+		//when port_usb is null,  should  free the req 
+		kfree(req);
+		dev_kfree_skb_any(skb);
+		return NET_XMIT_DROP;		
+	}
+	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
+	if (dev->port_usb->is_fixed &&
+	    length == dev->port_usb->fixed_in_len &&
+	    (length % in->maxpacket) == 0)
+		req->zero = 0;
+	else
+		req->zero = 1;
+
+	/* use zlp framing on tx for strict CDC-Ether conformance,
+	 * though any robust network rx path ignores extra padding.
+	 * and some hardware doesn't like to write zlps.
+	 */
+	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+		length++;
+
+	req->length = length;
+
+	/* throttle high/super speed IRQ rate back slightly */
+	if (gadget_is_dualspeed(dev->gadget))
+		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+				     dev->gadget->speed == USB_SPEED_SUPER)
+			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
+			: 0;
+#if 0
+    if (dev->port_usb->suspend_state == 1){
+		usb_gadget_wakeup(dev->gadget);
+		do{
+			msleep(2);
+		}while(dev->port_usb->suspend_state==1);
+	}
+#endif
+	//if(g_VNIC_MultiPacket_MaxNum ==1 )
+	//	retval = usb_ep_queue(in, req, GFP_ATOMIC);
+	//else
+	#if 1
+		retval = multi_packet_tx_queue(in, req, GFP_ATOMIC);
+	#else
+	if(strstr(net->name, "rndis"))
+	{
+		retval = multi_packet_tx_queue(in, req, GFP_ATOMIC);
+	}	
+	else
+	{
+		USBSTACK_DBG("usb_ep_queue");
+		retval = usb_ep_queue(in, req, GFP_ATOMIC);
+	}
+	#endif	
+	switch (retval) {
+	default:
+		DBG(dev, "tx queue err %d\n", retval);
+		break;
+	case 0:
+		net->trans_start = jiffies;
+		atomic_inc(&dev->tx_qlen);
+	}
+
+	if (retval) {
+		dev_kfree_skb_any(skb);
+drop:
+		dev->net->stats.tx_dropped++;
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (list_empty(&dev->tx_reqs))
+			netif_start_queue(net);
+		list_add_tail(&req->list, &dev->tx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return NETDEV_TX_OK;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	USBSTACK_DBG("%s", __func__);
+	//DBG(dev, "%s\n", __func__);
+	//if(strstr(dev->net->name, "rndis"))
+	{
+		multi_packet_activate();
+	}
+
+	/* fill the rx queue */
+	rx_fill(dev, gfp_flags);
+
+	/* and open the tx floodgates */
+	atomic_set(&dev->tx_qlen, 0);
+	netif_wake_queue(dev->net);
+}
+
+static int eth_open(struct net_device *net)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	struct gether	*link;
+	int rtv;
+	USBSTACK_DBG("%s", __func__);
+	//DBG(dev, "%s\n", __func__);
+
+    if (dev->eth_state == 0){
+        USBSTACK_DBG("%s, %u eth_state:%d", __func__, __LINE__, dev->eth_state);
+        return -EAGAIN;
+    }
+	
+	if((dev->port_usb == NULL) || (&dev->port_usb->func == NULL))
+		return -ENODEV;
+
+#if MULTIPACKET_BUF_ALLOC
+    if(strstr(dev->port_usb->func.name, "rndis"))
+    {
+        struct f_rndis		*rndis = func_to_rndis(&dev->port_usb->func);
+    	if(rndis->state != 2)
+        {	
+    		rtv = multi_packet_buf_alloc();
+    		if(rtv < 0)
+            {
+    			printk("eth_open, net is rndis and req alloc faild with no memory\n");
+    			return rtv;
+    		}
+    		rndis->state = 2;
+    	}        
+    }
+    else if(strstr(dev->port_usb->func.name, "mbim"))
+    {
+        struct f_mbim		*mbim = func_to_mbim(&dev->port_usb->func);
+    	if(mbim->state != 2)
+        {	
+    		rtv = multi_packet_buf_alloc();
+    		if(rtv < 0)
+            {
+    			printk("eth_open, net is mbim and req alloc faild with no memory\n");
+    			return rtv;
+    		}
+    		mbim->state = 2;
+    	}
+        
+    }
+
+#endif
+
+	if (netif_carrier_ok(dev->net))
+		eth_start(dev, GFP_KERNEL);
+
+	spin_lock_irq(&dev->lock);
+	link = dev->port_usb;
+	if (link && link->open)
+		link->open(link);
+	spin_unlock_irq(&dev->lock);
+
+	return 0;
+}
+
+static int eth_stop(struct net_device *net)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	unsigned long	flags;
+
+	USBSTACK_DBG("%s", __func__);
+	//VDBG(dev, "%s\n", __func__);
+	netif_stop_queue(net);
+
+	USBSTACK_DBG("stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
+		dev->net->stats.rx_errors, dev->net->stats.tx_errors
+		);
+
+	/* ensure there are no more active requests */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb) {
+		struct gether	*link = dev->port_usb;
+		const struct usb_endpoint_descriptor *in;
+		const struct usb_endpoint_descriptor *out;
+
+		if (link->close)
+			link->close(link);
+
+		/* NOTE:  we have no abort-queue primitive we could use
+		 * to cancel all pending I/O.  Instead, we disable then
+		 * reenable the endpoints ... this idiom may leave toggle
+		 * wrong, but that's a self-correcting error.
+		 *
+		 * REVISIT:  we *COULD* just let the transfers complete at
+		 * their own pace; the network stack can handle old packets.
+		 * For the moment we leave this here, since it works.
+		 */
+		 
+		//wangchao edit for ep-dma-free warning...
+#if 0		
+		in = link->in_ep->desc;
+		out = link->out_ep->desc;
+		usb_ep_disable(link->in_ep);
+		usb_ep_disable(link->out_ep);
+		if (netif_carrier_ok(net)) {
+			DBG(dev, "host still using in/out endpoints\n");
+			link->in_ep->desc = in;
+			link->out_ep->desc = out;
+			usb_ep_enable(link->in_ep);
+			usb_ep_enable(link->out_ep);
+		}
+#endif
+//when stop, data in vincPkt_list need release
+	u_ether_tx_vnic_packet_list();
+
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+/*----------mac addr---------------------------------------*/
+
+static u8 eth_addrs[ETH_MAX_NUM][ETH_ALEN] = {
+	{0x34,0x4b,0x50,0x00,0x00,0x00},
+	{0x34,0x4b,0x50,0x00,0x00,0x0a},
+	{0x34,0x4b,0x50,0x00,0x00,0x0b},
+	{0x34,0x4b,0x50,0x00,0x00,0x0c}};
+#define NVUSB_MAC_LEN 6
+
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *dev_addr;
+module_param(dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *host_addr;
+module_param(host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
+static int get_ether_addr(const char *str, u8 *dev_addr)
+{
+	if (str) {
+		unsigned	i;
+
+		for (i = 0; i < 6; i++) {
+			unsigned char num;
+
+			if ((*str == '.') || (*str == ':'))
+				str++;
+			num = hex_to_bin(*str++) << 4;
+			num |= hex_to_bin(*str++);
+			dev_addr [i] = num;
+		}
+		if (is_valid_ether_addr(dev_addr))
+			return 0;
+	}
+	random_ether_addr(dev_addr);
+	return 1;
+}
+
+static int usb_set_mac_address(struct net_device *dev, void *p)
+{
+    int ret = eth_mac_addr(dev, p);
+    int retval = 0;
+	
+    return ret;
+}
+
+static struct eth_dev *the_dev;
+static struct eth_devs n_eth_dev = {0};
+
+static const struct net_device_ops eth_netdev_ops = {
+	.ndo_open		= eth_open,
+	.ndo_stop		= eth_stop,
+	.ndo_start_xmit		= eth_start_xmit,
+	.ndo_change_mtu		= ueth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static ssize_t show_gether_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct eth_dev	*pDev = netdev_priv(to_net_dev(dev));
+	return sprintf(buf, "%d\n", pDev->eth_state);
+}
+
+static DEVICE_ATTR(gether_state, S_IRUGO, show_gether_state, NULL);
+
+static struct attribute *gether_dev_attrs[] = {
+	&dev_attr_gether_state.attr,
+	NULL,
+};
+
+static struct attribute_group gether_dev_attr_grp = {
+	.attrs = gether_dev_attrs,
+};
+
+static const struct attribute_group *gether_dev_groups[] = {
+	&gether_dev_attr_grp,
+	NULL
+};
+
+static struct device_type gadget_type = {
+	.name	= "gadget",
+	.groups	= &gether_dev_groups,
+};
+
+/**
+ * gether_setup - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ *	host side of the link is recorded
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework.  The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+{
+	return gether_setup_name(g, ethaddr, "usb");
+}
+
+/**
+ * gether_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ *	host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework.  The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname)
+{
+	int status;
+	status = gether_setup_name_num(g, ethaddr, netname, 0);
+	if (!status) {
+		the_dev = n_eth_dev.eth_dev[0];
+	}
+
+	return status;
+}
+
+/**
+ * gether_cleanup - remove Ethernet-over-USB device
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_setup().
+ */
+void gether_cleanup(void)
+{
+	if (!the_dev)
+		return;
+
+	gether_cleanup_num(0);
+	the_dev = NULL;
+}
+
+/**
+ * gether_connect - notify network layer that USB link is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ *	current device speed, and any framing wrapper(s) set up.
+ * Context: irqs blocked
+ *
+ * This is called to activate endpoints and let the network layer know
+ * the connection is active ("carrier detect").  It may cause the I/O
+ * queues to open and start letting network packets flow, but will in
+ * any case activate the endpoints so that they respond properly to the
+ * USB host.
+ *
+ * Verify net_device pointer returned using IS_ERR().  If it doesn't
+ * indicate some error code (negative errno), ep->driver_data values
+ * have been overwritten.
+ */
+struct net_device *gether_connect(struct gether *link)
+{
+	return gether_connect_num(link, 0);
+}
+
+/**
+ * gether_disconnect - notify network layer that USB link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * Context: irqs blocked
+ *
+ * This is called to deactivate endpoints and let the network layer know
+ * the connection went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ * The endpoints are inactive, and accordingly without active USB I/O.
+ * Pointers to endpoint descriptors and endpoint private data are nulled.
+ */
+void gether_disconnect(struct gether *link)
+{
+	struct eth_dev		*dev = link->ioport;
+	struct usb_request	*req;
+	unsigned long flags;
+
+	WARN_ON(!dev);
+	if (!dev)
+		return;
+	USBSTACK_DBG("%s", __func__);
+	DBG(dev, "%s\n", __func__);
+
+	//wangchao add to inform  USBnetcard down
+	//dev->eth_state = 0;
+	//schedule_work(&dev->inform_work);
+	
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+	//wangchao add to inform  USBnetcard down
+	if(strcmp(link->func.name,"mbim"))
+	{
+		printk("gether_disconnect , eth_state set 0\n");
+        dev->eth_state = 0;
+        schedule_work(&dev->inform_work);
+       // return ;
+    }	
+	//if(strstr(dev->net->name, "rndis"))
+	{
+		multi_packet_deactivate();
+	}
+	/* disable endpoints, forcing (synchronous) completion
+	 * of all pending i/o.  then free the request objects
+	 * and forget about the endpoints.
+	 */
+	if(usb_ep_disable(link->in_ep)!=0)
+	{
+        printk("###################[%s], line:%u \n", __func__, __LINE__);
+    }
+	u_ether_tx_vnic_packet_list();
+	spin_lock_irqsave(&dev->req_lock, flags);
+	while (!list_empty(&dev->tx_reqs)) {
+		req = container_of(dev->tx_reqs.next,
+					struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		usb_ep_free_request(link->in_ep, req);
+		spin_lock_irqsave(&dev->req_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	link->in_ep->driver_data = NULL;
+	link->in_ep->desc = NULL;
+
+	if(usb_ep_disable(link->out_ep) != 0)
+	{
+        printk("\n###################[%s], line:%u \n", __func__, __LINE__);
+    }
+	u_ether_rx_vnic_packet_list();
+	spin_lock_irqsave(&dev->req_lock, flags);
+	while (!list_empty(&dev->rx_reqs)) {
+		req = container_of(dev->rx_reqs.next,
+					struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		usb_ep_free_request(link->out_ep, req);
+		spin_lock_irqsave(&dev->req_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	link->out_ep->driver_data = NULL;
+	link->out_ep->desc = NULL;
+
+	/* finish forgetting about this USB link episode */
+	dev->header_len = 0;
+	dev->unwrap = NULL;
+	dev->wrap = NULL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+#ifdef CONFIG_PM	
+	atomic_set(&dev->port_usb->wake_state, 0);	
+#endif
+	dev->port_usb = NULL;
+	link->ioport = NULL;
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* ------------------------- multi ether ifac----------------------------*/
+int gether_setup_name_num(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname, u8 num)
+{
+	struct eth_dev		*dev;
+	struct net_device	*net;
+	int			status;
+	int                  ret;
+	int           is_valid = 0;
+	int           i;
+
+	if(num >= ETH_MAX_NUM)
+		return -ENOMEM;
+	
+	if (n_eth_dev.eth_dev[num])
+		return -EBUSY;
+
+	net = alloc_etherdev(sizeof *dev);
+	if (!net)
+		return -ENOMEM;
+    if(!ethaddr)
+        return -ENXIO;    
+
+	dev = netdev_priv(net);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->req_lock);
+	INIT_WORK(&dev->work, eth_work);
+	INIT_WORK(&dev->inform_work, eth_inform_work);
+	INIT_LIST_HEAD(&dev->tx_reqs);
+	INIT_LIST_HEAD(&dev->rx_reqs);
+    atomic_set(&dev->work_mode,0);
+
+	skb_queue_head_init(&dev->rx_frames);
+
+	/* network device setup */
+	dev->net = net;
+	dev->eth_state = 0;
+	dev->work_state = 0;
+    dev->net_func_reset = 0 ;
+	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+  #if 1
+	if(is_valid_ether_addr(ethaddr))
+	{
+		usb_printk("mac address is valid\n");
+	       is_valid = 1;
+		for(i = 0;i<NVUSB_MAC_LEN;i++)
+		{
+			net->dev_addr[i]   = ethaddr[i];
+		}	
+		net->dev_addr[NVUSB_MAC_LEN-1]   = ethaddr[NVUSB_MAC_LEN-1]-16;
+		memcpy(dev->host_mac, ethaddr, ETH_ALEN);
+	}
+   
+    if(is_valid == 0)
+    {
+    	random_ether_addr(net->dev_addr);
+	usb_printk("random_ether_addr ok\n");
+	host_addr = &eth_addrs[num][0];
+	memcpy(dev->host_mac, host_addr, ETH_ALEN);
+
+	if (ethaddr)
+		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+    }
+
+  #else
+  if (get_ether_addr(dev_addr, net->dev_addr))
+		dev_warn(&g->dev,
+			"using random %s ethernet address\n", "self");
+  #endif
+
+	//host_addr = &eth_addrs[num][0];
+	//memcpy(dev->host_mac, host_addr, ETH_ALEN);
+	
+#if 0	
+	if (get_ether_addr(host_addr, dev->host_mac))
+		dev_warn(&g->dev,
+			"using random %s ethernet address\n", "host");
+#endif
+
+	//if (ethaddr)
+		//memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+
+	net->netdev_ops = &eth_netdev_ops;
+	net->flags |=IFF_NOARP ;
+	SET_ETHTOOL_OPS(net, &ops);
+
+	dev->gadget = g;
+	SET_NETDEV_DEV(net, &g->dev);
+	SET_NETDEV_DEVTYPE(net, &gadget_type);
+
+	status = register_netdev(net);
+	if (status < 0) {
+		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+		free_netdev(net);
+	} else {
+		INFO(dev, "MAC %pM\n", net->dev_addr);
+		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+		n_eth_dev.eth_dev[num] = dev;
+
+		/* two kinds of host-initiated state changes:
+		 *  - iff DATA transfer is active, carrier is "on"
+		 *  - tx queueing enabled if open *and* carrier is "on"
+		 */
+		netif_carrier_off(net);
+	}
+
+	return status;
+}
+
+
+void gether_cleanup_num(u8 num)
+{
+	struct eth_dev *eth_dev = n_eth_dev.eth_dev[num];
+	
+	if (!eth_dev)
+		return;
+	
+	unregister_netdev(eth_dev->net);
+	flush_work_sync(&eth_dev->work);
+	flush_work_sync(&eth_dev->inform_work);
+	free_netdev(eth_dev->net);
+
+	n_eth_dev.eth_dev[num] = NULL;
+}
+
+struct net_device *gether_connect_num(struct gether *link, u8 num)
+{
+	struct eth_dev		*dev = n_eth_dev.eth_dev[num];
+	int			result = 0;
+	unsigned long flags;
+    printk("########%s, %u.......  \n\n",__func__, __LINE__  );
+
+	USBSTACK_DBG("%s: %d", __func__, num);
+	if (!dev){
+		printk("gether_connect_num dev is null\n");
+		return ERR_PTR(-EINVAL);
+	}
+#ifdef CONFIG_PM
+    if (link->suspend_state == 1)
+		link->suspend_state = 0;
+#endif	
+	link->in_ep->driver_data = dev;
+	result = usb_ep_enable(link->in_ep);
+	if (result != 0) {
+		printk("enable %s --> %d\n", link->in_ep->name, result);
+		DBG(dev, "enable %s --> %d\n", link->in_ep->name, result);
+		goto fail0;
+	}
+
+	link->out_ep->driver_data = dev;
+	result = usb_ep_enable(link->out_ep);
+	if (result != 0) {
+		printk("enable %s --> %d\n", link->in_ep->name, result);
+		DBG(dev, "enable %s --> %d\n",
+			link->out_ep->name, result);
+		goto fail1;
+	}
+
+	if (result == 0){
+		result = alloc_requests(dev, link, qlen(dev->gadget));
+		USBSTACK_DBG("alloc_requests fail result: %d", result);
+	}
+	
+	if (result == 0) {
+		dev->zlp = link->is_zlp_ok;
+		USBSTACK_DBG("qlen %d\n", qlen(dev->gadget));
+
+		dev->header_len = link->header_len;
+		dev->unwrap = link->unwrap;
+		dev->wrap = link->wrap;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->port_usb = link;
+		link->ioport = dev;
+		if (netif_running(dev->net)) {
+			if (link->open)
+				link->open(link);
+		} else {
+			if (link->close)
+				link->close(link);
+		}
+		spin_unlock_irqrestore(&dev->lock, flags);
+		
+		netif_carrier_on(dev->net);
+		if (netif_running(dev->net))
+			eth_start(dev, GFP_ATOMIC);
+		
+		//wangchao add to inform  USBnetcard up
+		//dev->eth_state = 1;
+		//schedule_work(&dev->inform_work);
+	/* on error, disable any endpoints  */
+	} else {
+		(void) usb_ep_disable(link->out_ep);
+fail1:
+		(void) usb_ep_disable(link->in_ep);
+	}
+fail0:
+	/* caller is responsible for cleanup on error */
+	if (result < 0){
+		printk("gether_connect_num fail, ret:%d\n", result);
+		return ERR_PTR(result);
+	}
+	return dev->net;
+}
+
+void gether_uevent_eth_rndis(struct gether *link)
+{
+	struct eth_dev		*dev = link->ioport;
+    if (dev->eth_state == 1){
+		gether_uevent_eth_switch(dev->net, 0);
+    }
+}
+
+void gether_uevent_eth_ecm(struct gether *link,int state)
+{
+	//struct eth_dev		*dev = link->ioport;
+   struct eth_dev *dev = n_eth_dev.eth_dev[0];
+	if(state == 0){
+    if (dev && (dev->eth_state==1)){
+		gether_ecm_uevent(0, 0);
+   	 }
+    }
+	else{
+		if (dev && (dev->eth_state==0)){
+		gether_ecm_uevent(0, 1);
+    		}
+	}
+}
+
+#define RNDIS_NUM 4
+extern unsigned int usblan[RNDIS_NUM];
+void gether_uevent_eth_switch(struct net_device *net, int eth_switch)
+{
+	struct eth_dev *dev;
+	dev = netdev_priv(net);
+	dev->eth_state = eth_switch;
+	usblan[0] = eth_switch;
+	schedule_work(&dev->inform_work);
+    return;
+}
+
+void gether_ecm_uevent(u8 num, int ecm_switch)
+{
+	int i =0;
+	int dev_num = num;
+	struct eth_dev *dev;
+
+	if(dev_num == 0)
+		dev_num = ETH_MAX_NUM;
+
+	for( i = 0; i < dev_num; i++){
+		dev = n_eth_dev.eth_dev[i];
+		if(dev){
+			dev->eth_state = ecm_switch;
+			usblan[i] = ecm_switch;
+			schedule_work(&dev->inform_work);
+		}
+	}
+	
+	return;
+}
+
+extern void mbim_clean_ntb_param_flag(void);
+
+void gether_mbim_uevent(int ecm_switch)
+{
+	struct eth_dev *dev;
+
+	dev = n_eth_dev.eth_dev[0];
+	if(dev == NULL || ecm_switch <0 || ecm_switch >2)
+    {
+        printk("[func]:%s ,[line]:%d \n",__func__,__LINE__) ;
+        return ;
+	}
+	//printk("[func]:%s ,[line]:%d ,ecm_switch:%d\n",__func__,__LINE__,ecm_switch) ;
+	
+    switch(ecm_switch)
+    {
+        case 2 :  //¸´Î»²Ù×÷
+        {
+            dev->net_func_reset = 1 ;
+            schedule_work(&dev->inform_work);
+			mbim_clean_ntb_param_flag();
+            break ;
+        }
+        case 1:
+        case 0:
+        {
+    		dev->eth_state = ecm_switch;
+    		usblan[0] = ecm_switch;
+    		schedule_work(&dev->inform_work);
+            break ;
+	    }
+    }
+  
+	return;
+}
+
+
+/*=======================MBIM·ÂÕæ================================*/
+
+static void  mbim_loop_test_tx_callback(struct usb_ep *ep, struct usb_request *req)
+{
+
+	struct eth_dev	*dev = ep->driver_data;
+
+    if(strstr(dev->port_usb->func.name, "mbim") == 0)
+    {
+        printk("[%s]:net type invalid \n",__func__) ;
+        return  ;
+    }
+    struct f_mbim   *mbim = func_to_mbim(&dev->port_usb->func);
+	unsigned long			flags;
+	struct sk_buff	*skb = req->context;
+
+
+	switch (req->status) {
+	default:
+		VDBG(dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		break;
+	case 0:
+		break ;
+	}
+	
+    //atomic_dec(&mbim->data_tx_cnt);
+	spin_lock_irqsave(&dev->req_lock, flags);
+	list_add_tail(&req->list, &dev->tx_reqs);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	dev_kfree_skb_any(skb);
+	atomic_dec(&dev->tx_qlen);
+	
+    wake_up(&mbim->data_write_wq) ;
+    
+}
+
+static void  mbim_loop_test_rx_callback(struct usb_ep *ep, struct usb_request *req)
+{
+
+    struct eth_dev  *dev = ep->driver_data;
+    if(strstr(dev->port_usb->func.name, "mbim") == 0)
+    {
+        printk("[%s]:net type invalid \n",__func__) ;
+        return  ;
+    }
+   // printk("[%s]: ###function name :%s \n",__func__,dev->port_usb->func.name) ;
+    struct f_mbim   *mbim = func_to_mbim(&dev->port_usb->func);
+    //´Ódata idle
+    //¿½±´Êý¾Ý
+    //²åÈëÁ´±í
+    //֪ͨ
+    struct sk_buff  *skb = req->context, *skb2 = NULL;
+
+    int     status = req->status;
+
+	//printk("[%s]:dataidlecnt:%d, skb data len:%d, req len:%d, req status:%d\n",__func__, 
+	//	atomic_read(&mbim->data_idle_cnt),skb->data_len, req->actual, req->status) ;		
+    unsigned long flags;
+    //int packet_num = get_vnic_multi_packet_num();
+    struct  mbim_pool_data_s *  data_channel_p = NULL ;
+	if((atomic_read(&mbim->lb_flag) == 0) || (atomic_read(&dev->work_mode) == 0)){
+		//loopback test is end
+		printk("[%s]: loopback test end \n",__func__) ;
+		goto quiesce;
+	}
+    switch (status) 
+    {
+    /* normal completion */
+    case 0:
+        skb_put(skb, req->actual + sizeof(struct ethhdr));
+		//skb_pull(skb,sizeof(struct ethhdr));
+        spin_lock_irqsave(&mbim->data_lock,flags) ;
+        //if(atomic_read(&mbim->data_idle_cnt)== 0)
+        if(list_empty(&mbim->data_ilde_list))
+        {
+            printk("[%s]:rx queue full..... \n",__func__) ;
+			if(list_empty(&mbim->data_rx_list))
+        	{
+				 spin_unlock_irqrestore(&mbim->data_lock,flags) ; 
+				 WARN_ON(1) ;
+				 goto quiesce ;
+			}
+            data_channel_p = list_first_entry(&mbim->data_rx_list, struct mbim_pool_data_s, list) ;
+            list_del_init(&data_channel_p->list) ;
+            atomic_dec(&mbim->data_rx_cnt) ;
+     
+            skb2 = ( struct sk_buff  *) (data_channel_p->pdata);
+        }
+        else
+        {
+            data_channel_p = list_first_entry(&mbim->data_ilde_list, struct mbim_pool_data_s, list) ;
+            atomic_dec(&mbim->data_idle_cnt) ;
+            list_del_init(&data_channel_p->list) ;
+        }
+        //²åÈëеÄskb
+        data_channel_p->pdata = skb ;
+        list_add_tail(&data_channel_p->list, &mbim->data_rx_list) ;
+		
+		atomic_inc(&mbim->data_rx_cnt) ;
+        //atomic_inc(mbim->data_rx_cnt) ;
+        spin_unlock_irqrestore(&mbim->data_lock,flags) ; 
+        //»½ÐÑ
+       // wake_up(&mbim->data_read_wq)  ;
+       wake_up(&mbim->lp_wait)  ;
+		if(skb2 != NULL)
+		{
+			dev_kfree_skb_any(skb2) ;
+		}
+
+        break;
+        
+    case -ECONNRESET:       /* unlink */
+    case -ESHUTDOWN:        /* disconnect etc */
+        VDBG(dev, "rx shutdown, code %d\n", status);
+        goto quiesce;
+
+quiesce:
+        dev_kfree_skb_any(skb);
+        goto clean;
+        
+    case -EOVERFLOW:
+    default:
+        DBG(dev, "rx status %d\n", status);
+        break;
+    }
+
+    if ((atomic_read(&dev->work_mode) == 0)) 
+    {
+clean:
+        spin_lock_irqsave(&dev->req_lock, flags);
+        list_add_tail(&req->list, &dev->rx_reqs);
+        spin_unlock_irqrestore(&dev->req_lock, flags);
+        req = NULL;
+    }
+    if (req)
+    {
+        rx_submit(dev, req, GFP_ATOMIC);
+    }
+
+}
+
+int mbim_loop_test_xmit( struct eth_dev      *dev ,struct sk_buff* skb) 
+{
+    //»ñÈ¡req
+    //ÅäÖÃreq
+    //·ÅÈëÁ´±í
+    //struct eth_dev      *dev = netdev_priv(net);
+    int         length = 0;
+    int         retval;
+    struct usb_request  *req = NULL;
+    unsigned long       flags;
+    struct usb_ep       *in;
+    u16         cdc_filter;
+
+    if(dev == NULL || skb == NULL)
+    {
+        printk("%s : param invalid \n",__func__) ;
+        return -NET_XMIT_DROP ;
+    }
+	length = skb->len;
+    spin_lock_irqsave(&dev->lock, flags);
+    if (dev->port_usb) {
+        in = dev->port_usb->in_ep;
+        cdc_filter = dev->port_usb->cdc_filter;
+    } else {
+        in = NULL;
+        cdc_filter = 0;
+    }
+    spin_unlock_irqrestore(&dev->lock, flags);
+    if (!in) {
+        dev_kfree_skb_any(skb);
+        return -NET_XMIT_DROP;
+    }
+    spin_lock_irqsave(&dev->req_lock, flags);
+    /*
+     * this freelist can be empty if an interrupt triggered disconnect()
+     * and reconfigured the gadget (shutting down this queue) after the
+     * network stack decided to xmit but before we got the spinlock.
+     */
+ #if 0    
+     if (dev->eth_state == 0) {
+                spin_unlock_irqrestore(&dev->req_lock, flags);
+                dev_kfree_skb_any(skb);
+                return -NET_XMIT_DROP;
+    }
+#endif            
+    if (list_empty(&dev->tx_reqs) || (dev->port_usb == NULL)
+#ifdef CONFIG_PM
+        ||(dev->port_usb != NULL && dev->port_usb->suspend_state == 1)
+#endif
+        ) {
+        spin_unlock_irqrestore(&dev->req_lock, flags);
+        dev_kfree_skb_any(skb);
+        return -NET_XMIT_DROP;
+    }
+
+    req = container_of(dev->tx_reqs.next, struct usb_request, list);
+    list_del(&req->list);
+
+    /* temporarily stop TX queue when the freelist empties */
+    
+    spin_unlock_irqrestore(&dev->req_lock, flags);
+
+    /* no buffer copies needed, unless the network stack did it
+     * or the hardware can't use skb buffers.
+     * or there's not enough space for extra headers we need
+     */
+
+
+    skb = flush_skbuf(skb);
+	if(skb == NULL)
+	{
+		return -NET_XMIT_DROP;
+	}
+	
+    req->buf = skb->data;
+    req->dma = virtaddr_to_phys(skb->data);
+    req->context = skb;
+    req->complete = mbim_loop_test_tx_callback;
+
+
+    if(dev->port_usb == NULL)
+    {
+        //when port_usb is null,  should  free the req 
+        kfree(req);
+        dev_kfree_skb_any(skb);
+        return -NET_XMIT_DROP;
+    }
+    /* NCM requires no zlp if transfer is dwNtbInMaxSize */
+    if (dev->port_usb->is_fixed &&
+        length == dev->port_usb->fixed_in_len &&
+        (length % in->maxpacket) == 0)
+        req->zero = 0;
+    else
+        req->zero = 1;
+
+    /* use zlp framing on tx for strict CDC-Ether conformance,
+     * though any robust network rx path ignores extra padding.
+     * and some hardware doesn't like to write zlps.
+     */
+    if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+        length++;
+
+    req->length = length;
+
+    /* throttle high/super speed IRQ rate back slightly */
+    if (gadget_is_dualspeed(dev->gadget))
+        req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+                     dev->gadget->speed == USB_SPEED_SUPER)
+            ? ((atomic_read(&dev->tx_qlen) % qmult) != 0) : 0;
+
+	//printk("-----------mbim_loop_test_xmit, now multi_packet_tx_queue\r\n");
+    retval = multi_packet_tx_queue(in, req, GFP_ATOMIC);
+
+    switch (retval) 
+    {
+    default:
+        DBG(dev, "tx queue err %d\n", retval);
+        break;
+    case 0:
+        atomic_inc(&dev->tx_qlen);
+    }
+
+    if (retval) 
+    {
+        dev_kfree_skb_any(skb);
+drop:
+        spin_lock_irqsave(&dev->req_lock, flags);
+        list_add_tail(&req->list, &dev->tx_reqs);
+        spin_unlock_irqrestore(&dev->req_lock, flags);
+    }
+    return 0;
+
+}
+
+
+
+int mbim_switch_network_mode(struct gether *link ,int type)
+{
+
+    if(link == NULL || link->ioport == NULL || link->ioport->net == NULL)
+    {
+        printk("[%s],[line]:%d ,param invalid \n",__func__,__LINE__);
+        return -ENODEV ;
+    }
+    struct eth_dev		*dev = link->ioport;
+    if(type == 0)
+    {
+		printk("[%s],[line]:%d ,now switch to normal mode \n",__func__,__LINE__);
+        
+        atomic_set(&dev->work_mode, 0) ;		
+		mbim_change_rx_complete(rx_complete);
+     //   netif_start_queue(dev->net);
+    }
+    else
+    {    
+	printk("[%s],[line]:%d ,now switch to loop test mode \n",__func__,__LINE__);
+	
+         atomic_set(&dev->work_mode, 1) ;
+		 
+        // netif_stop_queue(dev->net);
+		 //reset rx req  
+		 
+	mbim_change_rx_complete(test_rx_complete);
+    }
+    return 0 ;
+}
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_ether.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_ether.h
new file mode 100755
index 0000000..3f2dbd2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_ether.h
@@ -0,0 +1,184 @@
+/*
+ * u_ether.h -- interface to USB gadget "ethernet link" utilities
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __U_ETHER_H
+#define __U_ETHER_H
+
+#include <linux/err.h>
+#include <linux/if_ether.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+#include "gadget_chips.h"
+
+
+#define USB_ETHER_REPROT_MAX_MODEM_SPEED
+#define LTE_CAT4_SPEED 150000000            /* 150M bps*/
+#define LTE_CAT6_SPEED 300000000            /* 300M bps*/
+
+/*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing.  Two functions
+ * in different configurations could share the same ethernet link/netdev,
+ * using different host interaction models.
+ *
+ * There is a current limitation that only one instance of this link may
+ * be present in any given configuration.  When that's a problem, network
+ * layer facilities can be used to package multiple logical links on this
+ * single "physical" one.
+ */
+struct gether {
+	struct usb_function		func;
+
+	/* updated by gether_{connect,disconnect} */
+	struct eth_dev			*ioport;
+
+	/* endpoints handle full and/or high speeds */
+	struct usb_ep			*in_ep;
+	struct usb_ep			*out_ep;
+
+	bool				is_zlp_ok;
+
+	u16				cdc_filter;
+
+	/* hooks for added framing, as needed for RNDIS and EEM. */
+	u32				header_len;
+	/* NCM requires fixed size bundles */
+	bool				is_fixed;
+	u32				fixed_out_len;
+	u32				fixed_in_len;
+	struct sk_buff			*(*wrap)(struct gether *port,
+						struct sk_buff *skb);
+	int				(*unwrap)(struct gether *port,
+						struct sk_buff *skb,
+						struct sk_buff_head *list);
+
+	/* called on network open/close */
+	void				(*open)(struct gether *);
+	void				(*close)(struct gether *);
+#ifdef CONFIG_PM
+	u32              suspend_state;
+	atomic_t         wake_state;
+#endif
+};
+
+struct eth_dev {
+	/* lock is held while accessing port_usb
+	 * or updating its backlink port_usb->ioport
+	 */
+	spinlock_t		lock;
+	struct gether		*port_usb;
+
+	struct net_device	*net;
+	struct usb_gadget	*gadget;
+
+	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
+	struct list_head	tx_reqs, rx_reqs;
+	atomic_t		tx_qlen;
+
+	struct sk_buff_head	rx_frames;
+
+	unsigned		header_len;
+	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
+	int			(*unwrap)(struct gether *,
+						struct sk_buff *skb,
+						struct sk_buff_head *list);
+
+	struct work_struct	work;
+
+	struct work_struct inform_work;	 /* inform USB netCard state( online or offline ) */
+	int		eth_state; /* 0: offline,  1:online */
+	int 		work_state;
+	atomic_t    work_mode ;  //0:network mode ;1:test mode
+	u8          net_func_reset ;
+	unsigned long		todo;
+#define	WORK_RX_MEMORY		0
+
+	bool			zlp;
+	u8			host_mac[ETH_ALEN];
+};
+
+#define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
+			|USB_CDC_PACKET_TYPE_ALL_MULTICAST \
+			|USB_CDC_PACKET_TYPE_PROMISCUOUS \
+			|USB_CDC_PACKET_TYPE_DIRECTED)
+
+
+/* netdev setup/teardown as directed by the gadget driver */
+int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]);
+void gether_cleanup(void);
+void gether_cleanup_num(u8 num);
+
+/* variant of gether_setup that allows customizing network device name */
+int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname);
+int gether_setup_name_num(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname, u8 num);
+
+/* connect/disconnect is handled by individual functions */
+struct net_device *gether_connect(struct gether *);
+struct net_device *gether_connect_num(struct gether *link, u8 num);
+void gether_disconnect(struct gether *);
+
+void gether_uevent_eth_switch(struct net_device *net, int eth_switch);
+void gether_ecm_uevent(u8 num, int ecm_switch);
+void gether_uevent_eth_rndis(struct gether *link);
+void gether_uevent_eth_ecm(struct gether *link,int state);
+unsigned int gether_ether_skb_num(void);
+
+/* Some controllers can't support CDC Ethernet (ECM) ... */
+static inline bool can_support_ecm(struct usb_gadget *gadget)
+{
+	if (!gadget_supports_altsettings(gadget))
+		return false;
+
+	/* Everything else is *presumably* fine ... but this is a bit
+	 * chancy, so be **CERTAIN** there are no hardware issues with
+	 * your controller.  Add it above if it can't handle CDC.
+	 */
+	return true;
+}
+
+/* each configuration may bind one instance of an ethernet link */
+int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int ecm_bind_config_num(struct usb_configuration *c, u8 ethaddr[ETH_ALEN], u8 num);
+int ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int eem_bind_config(struct usb_configuration *c);
+void rx_fill(struct eth_dev *dev, gfp_t gfp_flags);
+int mbim_loop_test_xmit( struct eth_dev      *dev ,struct sk_buff* skb); 
+
+#ifdef USB_ETH_RNDIS
+
+int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer);
+
+#else
+
+static inline int
+rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	return 0;
+}
+
+static inline int
+rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer)
+{
+	return 0;
+}
+
+#endif
+
+#endif /* __U_ETHER_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_phonet.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_phonet.h
new file mode 100644
index 0000000..09a7525
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_phonet.h
@@ -0,0 +1,21 @@
+/*
+ * u_phonet.h - interface to Phonet
+ *
+ * Copyright (C) 2007-2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+#ifndef __U_PHONET_H
+#define __U_PHONET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+int gphonet_setup(struct usb_gadget *gadget);
+int phonet_bind_config(struct usb_configuration *c);
+void gphonet_cleanup(void);
+
+#endif /* __U_PHONET_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.c
new file mode 100644
index 0000000..978a1b0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.c
@@ -0,0 +1,1751 @@
+/*
+ * u_serial.c - utilities for USB gadget "serial port"/TTY support
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This code also borrows from usbserial.c, which is
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "u_serial.h"
+
+
+/*
+ * This component encapsulates the TTY layer glue needed to provide basic
+ * "serial port" functionality through the USB gadget stack.  Each such
+ * port is exposed through a /dev/ttyGS* node.
+ *
+ * After initialization (gserial_setup), these TTY port devices stay
+ * available until they are removed (gserial_cleanup).  Each one may be
+ * connected to a USB function (gserial_connect), or disconnected (with
+ * gserial_disconnect) when the USB host issues a config change event.
+ * Data can only flow when the port is connected to the host.
+ *
+ * A given TTY port can be made available in multiple configurations.
+ * For example, each one might expose a ttyGS0 node which provides a
+ * login application.  In one case that might use CDC ACM interface 0,
+ * while another configuration might use interface 3 for that.  The
+ * work to handle that (including descriptor management) is not part
+ * of this component.
+ *
+ * Configurations may expose more than one TTY port.  For example, if
+ * ttyGS0 provides login service, then ttyGS1 might provide dialer access
+ * for a telephone or fax link.  And ttyGS2 might be something that just
+ * needs a simple byte stream interface for some messaging protocol that
+ * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
+ */
+
+#define PREFIX	"ttyGS"
+
+/*
+ * gserial is the lifecycle interface, used by USB functions
+ * gs_port is the I/O nexus, used by the tty driver
+ * tty_struct links to the tty/filesystem framework
+ *
+ * gserial <---> gs_port ... links will be null when the USB link is
+ * inactive; managed by gserial_{connect,disconnect}().  each gserial
+ * instance can wrap its own USB control protocol.
+ *	gserial->ioport == usb_ep->driver_data ... gs_port
+ *	gs_port->port_usb ... gserial
+ *
+ * gs_port <---> tty_struct ... links will be null when the TTY file
+ * isn't opened; managed by gs_open()/gs_close()
+ *	gserial->port_tty ... tty_struct
+ *	tty_struct->driver_data ... gserial
+ */
+
+/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
+ * next layer of buffering.  For TX that's a circular buffer; for RX
+ * consider it a NOP.  A third layer is provided by the TTY code.
+ */
+#define RX_QUEUE_SIZE		1//32
+#define TX_QUEUE_SIZE		2//32
+#define TX_QUEUE_SIZE_LOG		8//32
+//change the write buf from 128k to 8k
+//#define WRITE_BUF_SIZE		(8192*16)//8192		/* TX only */
+#define WRITE_BUF_SIZE		(1024*8)//8192		/* TX only */
+
+#define USB_SERIAL_MAX_TRANS  4096//8192
+
+#ifndef CONFIG_SYSTEM_RECOVERY
+volatile bool g_bUsbDevHotAdd[8] = {false, false, false, false, false, false, false, false}; // usbÖ§³Ö×î´ó9¸ö¶Ëµã£¬ÆäÖÐ1¸ö¿ØÖƶ˵ã
+EXPORT_SYMBOL(g_bUsbDevHotAdd);
+#endif
+
+
+/* circular buffer */
+struct gs_buf {
+	unsigned		buf_size;
+	char			*buf_buf;
+	char			*buf_get;
+	char			*buf_put;
+};
+
+/*
+ * The port structure holds info for each port, one for each minor number
+ * (and thus for each /dev/ node).
+ */
+struct gs_port {
+	spinlock_t		port_lock;	/* guard port_* access */
+
+	struct gserial		*port_usb;
+	struct tty_struct	*port_tty;
+
+	unsigned		open_count;
+	bool			openclose;	/* open/close in progress */
+	u8			port_num;
+
+	wait_queue_head_t	close_wait;	/* wait for last close */
+
+	struct list_head	read_pool;
+	int read_started;
+	int read_allocated;
+	int read_complete;	
+	struct list_head	read_queue;
+	unsigned		n_read;
+	struct tasklet_struct	push;
+	struct timer_list rx_timer;
+	atomic_t gs_timer_inited;
+	bool rx_push_run;
+	int flip_tty_flag;
+
+	struct work_struct online_inform_work;	 /* inform USB gserial state( online ) */
+	struct work_struct offline_inform_work;	 /* inform USB gserial state( offline ) */
+	int off_report;
+	struct list_head	write_pool;
+	int write_started;
+	int write_allocated;
+	struct gs_buf		port_write_buf;
+	wait_queue_head_t	drain_wait;	/* wait while writes drain */
+
+	/* REVISIT this state ... */
+	struct usb_cdc_line_coding port_line_coding;	/* 8-N-1 etc */
+	int binded;	/*does this port binded with one gserial obj*/
+};
+
+/* increase N_PORTS if you need more */
+#define N_PORTS		4
+static struct portmaster {
+	struct mutex	lock;			/* protect open/close */
+	struct gs_port	*port;
+	struct device	*tty_dev;
+} ports[N_PORTS];
+static unsigned	n_ports;
+static int inform_state = 0;
+
+#define GS_CLOSE_TIMEOUT		15		/* seconds */
+
+
+#ifdef VERBOSE_DEBUG
+#define pr_vdebug(fmt, arg...) \
+	pr_debug(fmt, ##arg)
+#else
+#define pr_vdebug(fmt, arg...) \
+	({ if (0) pr_debug(fmt, ##arg); })
+#endif
+
+static void gserial_inform_online_work(struct work_struct *online_inform_work)
+{
+#if 1
+	struct gs_port	*port = container_of(online_inform_work, struct gs_port, online_inform_work);
+	
+	kobject_uevent(&ports[port->port_num].tty_dev->kobj, KOBJ_ONLINE);
+
+#if 0
+	#ifndef CONFIG_SYSTEM_RECOVERY
+		g_bUsbDevHotAdd[port->port_num] = true;
+	#endif
+#endif
+	usb_printk("%s,ttyGS%d,end\n",__func__,port->port_num);
+#endif
+}
+
+static void gserial_inform_offline_work(struct work_struct *offline_inform_work)
+{
+#if 1
+	struct gs_port	*port = container_of(offline_inform_work, struct gs_port, offline_inform_work);
+	
+	kobject_uevent(&ports[port->port_num].tty_dev->kobj, KOBJ_OFFLINE);
+
+#if 0
+	#ifndef CONFIG_SYSTEM_RECOVERY
+		g_bUsbDevHotAdd[port->port_num] = false;
+	#endif
+#endif
+	usb_printk("%s,ttyGS%d,end\n",__func__,port->port_num);
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Circular Buffer */
+
+/*
+ * gs_buf_alloc
+ *
+ * Allocate a circular buffer and all associated memory.
+ */
+static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
+{
+	gb->buf_buf = kmalloc(size, GFP_KERNEL);
+	if (gb->buf_buf == NULL)
+		return -ENOMEM;
+
+	gb->buf_size = size;
+	gb->buf_put = gb->buf_buf;
+	gb->buf_get = gb->buf_buf;
+
+	return 0;
+}
+
+/*
+ * gs_buf_free
+ *
+ * Free the buffer and all associated memory.
+ */
+static void gs_buf_free(struct gs_buf *gb)
+{
+	kfree(gb->buf_buf);
+	gb->buf_buf = NULL;
+}
+
+/*
+ * gs_buf_clear
+ *
+ * Clear out all data in the circular buffer.
+ */
+static void gs_buf_clear(struct gs_buf *gb)
+{
+	gb->buf_get = gb->buf_put;
+	/* equivalent to a get of all data available */
+}
+
+/*
+ * gs_buf_data_avail
+ *
+ * Return the number of bytes of data written into the circular
+ * buffer.
+ */
+static unsigned gs_buf_data_avail(struct gs_buf *gb)
+{
+	return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
+}
+
+/*
+ * gs_buf_space_avail
+ *
+ * Return the number of bytes of space available in the circular
+ * buffer.
+ */
+static unsigned gs_buf_space_avail(struct gs_buf *gb)
+{
+	return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
+}
+
+/*
+ * gs_buf_put
+ *
+ * Copy data data from a user buffer and put it into the circular buffer.
+ * Restrict to the amount of space available.
+ *
+ * Return the number of bytes copied.
+ */
+static unsigned
+gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
+{
+	unsigned len;
+
+	len  = gs_buf_space_avail(gb);
+#if 0
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		//return 0;
+		return -ENOMEM;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_put;
+	if (count > len) {
+		memcpy(gb->buf_put, buf, len);
+		memcpy(gb->buf_buf, buf+len, count - len);
+		gb->buf_put = gb->buf_buf + count - len;
+	} else {
+		memcpy(gb->buf_put, buf, count);
+		if (count < len)
+			gb->buf_put += count;
+		else /* count == len */
+			gb->buf_put = gb->buf_buf;
+	}
+
+	return count;
+#else
+	if ((len == 0) || (count > len)){
+		return -ENOMEM;
+	}	
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_put;
+	if (count > len) {
+		memcpy(gb->buf_put, buf, len);
+		memcpy(gb->buf_buf, buf+len, count - len);
+		gb->buf_put = gb->buf_buf + count - len;
+	} else {
+		memcpy(gb->buf_put, buf, count);
+		if (count < len)
+			gb->buf_put += count;
+		else /* count == len */
+			gb->buf_put = gb->buf_buf;
+	}	
+
+#endif
+	return count;
+}
+
+/*
+ * gs_buf_get
+ *
+ * Get data from the circular buffer and copy to the given buffer.
+ * Restrict to the amount of data available.
+ *
+ * Return the number of bytes copied.
+ */
+static unsigned
+gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
+{
+	unsigned len;
+
+	len = gs_buf_data_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_get;
+	if (count > len) {
+		memcpy(buf, gb->buf_get, len);
+		memcpy(buf+len, gb->buf_buf, count - len);
+		gb->buf_get = gb->buf_buf + count - len;
+	} else {
+		memcpy(buf, gb->buf_get, count);
+		if (count < len)
+			gb->buf_get += count;
+		else /* count == len */
+			gb->buf_get = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* I/O glue between TTY (upper) and USB function (lower) driver layers */
+
+/*
+ * gs_alloc_req
+ *
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+struct usb_request *
+gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			return NULL;
+		}
+	}
+
+	return req;
+}
+
+/*
+ * gs_free_req
+ *
+ * Free a usb_request and its buffer.
+ */
+void gs_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+/*
+ * gs_send_packet
+ *
+ * If there is data to send, a packet is built in the given
+ * buffer and the size is returned.  If there is no data to
+ * send, 0 is returned.
+ *
+ * Called with port_lock held.
+ */
+static unsigned
+gs_send_packet(struct gs_port *port, char *packet, unsigned size)
+{
+	unsigned len;
+
+	len = gs_buf_data_avail(&port->port_write_buf);
+#if 0	
+	if (len < size)
+		size = len;
+	if (size != 0)
+		size = gs_buf_get(&port->port_write_buf, packet, size);
+#else
+	if(len == 0){
+		return 0;
+	}
+	size = gs_buf_get(&port->port_write_buf, packet, size);
+#endif
+	return size;
+}
+
+/*
+ * gs_start_tx
+ *
+ * This function finds available write requests, calls
+ * gs_send_packet to fill these packets with data, and
+ * continues until either there are no more write requests
+ * available or no more data to send.  This function is
+ * run whenever data arrives or write requests are available.
+ *
+ * Context: caller owns port_lock; port_usb is non-null.
+ */
+#ifdef CONFIG_PM
+unsigned int g_dbg_userial_times = 0;
+#endif
+static int gs_start_tx(struct gs_port *port)
+/*
+__releases(&port->port_lock)
+__acquires(&port->port_lock)
+*/
+{
+	struct list_head	*pool = &port->write_pool;
+	struct usb_ep		*in = NULL;
+	int			status = 0;
+	bool			do_tty_wake = false;
+
+    if(NULL==port->port_usb)
+		return 0;	
+    in = port->port_usb->in;
+	while (!list_empty(&port->write_pool)) {
+		pool = &port->write_pool;
+		struct usb_request	*req;
+		int			len;
+
+		if (port->write_started >= TX_QUEUE_SIZE)
+			break;
+		if((pool->next == NULL) || (pool->next == &port->write_pool)){			
+			break;	
+		}
+
+		//req = list_entry(pool->next, struct usb_request, list);
+		req = container_of(pool->next, struct usb_request, list);
+		//len = gs_send_packet(port, req->buf, in->maxpacket);
+		len = gs_send_packet(port, req->buf, USB_SERIAL_MAX_TRANS);
+		if (len == 0) {
+			wake_up_interruptible(&port->drain_wait);
+			break;
+		}
+		do_tty_wake = true;
+
+		req->length = len;
+		//list_del(&req->list);
+		list_del_init(&req->list);
+		req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
+
+		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
+				port->port_num, len, *((u8 *)req->buf),
+				*((u8 *)req->buf+1), *((u8 *)req->buf+2));
+
+		/* Drop lock while we call out of driver; completions
+		 * could be issued while we do so.  Disconnection may
+		 * happen too; maybe immediately before we queue this!
+		 *
+		 * NOTE that we may keep sending data for a while after
+		 * the TTY closed (dev->ioport->port_tty is NULL).
+		 */
+#ifdef CONFIG_PM
+		if (port->port_usb&&port->port_usb->suspend_state == 1){
+			g_dbg_userial_times++;
+			if(g_dbg_userial_times == 1 || g_dbg_userial_times%3000 == 0){
+			usb_printk("%s, %u portname:%s, wrtime:%d\n", __func__, __LINE__, port->port_usb->func.name, g_dbg_userial_times);
+			}
+			status =- ESHUTDOWN;
+			printk("port:0x%p, owner:%p\n", port, port->port_lock.lock.owner);
+#if 0
+            if(port->port_usb->func.config && port->port_usb->func.config->cdev && port->port_usb->func.config->cdev->gadget) {
+          		spin_unlock(&port->port_lock);
+			    usb_gadget_wakeup(port->port_usb->func.config->cdev->gadget);
+				do{
+					msleep(2);
+				}while(port->port_usb->suspend_state==1);
+         	   spin_lock(&port->port_lock);
+            }
+		}	
+		//if (port->port_usb&&port->port_usb->suspend_state == 0)
+#else
+		}else
+#endif
+#endif
+		{
+#ifdef CONFIG_PM		
+	     g_dbg_userial_times = 0;
+#endif 
+            spin_unlock(&port->port_lock);
+            status = usb_ep_queue(in, req, GFP_ATOMIC);
+            spin_lock(&port->port_lock);
+        }
+
+		if (status) {
+			pr_debug("%s: %s %s err %d\n",
+					__func__, "queue", in->name, status);
+			list_add(&req->list, &port->write_pool);
+			break;
+		}
+
+		port->write_started++;
+
+		/* abort immediately after disconnect */
+		if (!port->port_usb)
+			break;
+	}
+
+	if (do_tty_wake && port->port_tty)
+		tty_wakeup(port->port_tty);
+	return status;
+}
+
+/*
+ * Context: caller owns port_lock, and port_usb is set
+ */
+static unsigned gs_start_rx(struct gs_port *port)
+/*
+__releases(&port->port_lock)
+__acquires(&port->port_lock)
+*/
+{
+	struct list_head	*pool = &port->read_pool;
+	struct usb_ep		*out = NULL;
+
+    if(NULL == port->port_usb)
+		return 0;
+    out = port->port_usb->out;
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+		int			status;
+		struct tty_struct	*tty;
+
+		/* no more rx if closed */
+		tty = port->port_tty;
+		if (!tty)
+			break;
+
+		if (port->read_started >= RX_QUEUE_SIZE)
+			break;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		//req->length = out->maxpacket;
+		req->length = USB_SERIAL_MAX_TRANS;
+
+		/* drop lock while we call out; the controller driver
+		 * may need to call us back (e.g. for disconnect)
+		 */
+		spin_unlock(&port->port_lock);
+		status = usb_ep_queue(out, req, GFP_ATOMIC);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			pr_debug("%s: %s %s err %d\n",
+					__func__, "queue", out->name, status);
+			list_add(&req->list, pool);
+			break;
+		}
+		port->read_started++;
+
+		/* abort immediately after disconnect */
+		if (!port->port_usb)
+			break;
+	}
+	return port->read_started;
+}
+
+/*
+ * RX tasklet takes data out of the RX queue and hands it up to the TTY
+ * layer until it refuses to take any more data (or is throttled back).
+ * Then it issues reads for any further data.
+ *
+ * If the RX queue becomes full enough that no usb_request is queued,
+ * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
+ * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
+ * can be buffered before the TTY layer's buffers (currently 64 KB).
+ */
+
+int rx_print_cnt = 0;
+static  int get_rx_push_task_state(struct tasklet_struct *t)
+{
+	if(t != NULL){
+		return (t->state & 0x1);
+	}
+		return -1;
+}
+ 
+#define  RX_TIME_OUT  (HZ *1)
+
+void gs_rx_push_timeout(void * data)
+{
+	struct gs_port		*port = (void *)data;
+	struct tty_struct	*tty = NULL; 
+	struct gserial		*gser =NULL;
+	if(port == NULL)
+		return;
+    tty = port->port_tty;
+    gser = port->port_usb;
+	if(tty == NULL)
+		return;	    
+	if(gser == NULL)
+		return;    
+	
+#ifdef CONFIG_PM	
+	if(gser->suspend_state == 1)
+		return;
+#endif
+	
+	spin_lock_irq(&port->port_lock);			
+	if((tty->buf.memory_used >= 64000) &&
+		(port->read_complete == RX_QUEUE_SIZE) &&
+		list_empty(&port->read_pool) && 
+		(port->read_started == RX_QUEUE_SIZE) &&
+		(get_rx_push_task_state(&port->push) == 0) && 
+		(port->rx_push_run == false)
+		){
+			printk("++++++++++++gs_rx_push_timeout enter,id:%d\n",port->port_num);
+	//	panic("gs_rx_push_timeout, push to tty stoped\n");
+			port->flip_tty_flag = 1;
+			tasklet_schedule(&port->push);
+	} else{
+		if(gser){
+			#ifdef CONFIG_PM
+			if((gser->suspend_state == 0)&&(atomic_read(&port->gs_timer_inited) == 1)) {
+			#else
+			if(atomic_read(&port->gs_timer_inited) == 1) {			
+			#endif	
+				mod_timer(&(port->rx_timer), jiffies + RX_TIME_OUT);
+				//printk("gs_rx_push_timeout mod_timer.\n");
+			}
+		}
+	}
+	spin_unlock_irq(&port->port_lock);		
+}
+
+static void gs_rx_push(unsigned long _port)
+{
+	struct gs_port		*port = (void *)_port;
+	struct tty_struct	*tty;
+	struct list_head	*queue = &port->read_queue;
+	bool			disconnect = false;
+	bool			do_push = false;
+	struct gserial		*gser = port->port_usb;
+	/* hand any queued data to the tty */
+	spin_lock_irq(&port->port_lock);
+	port->rx_push_run = true;
+	if(gser){
+		#ifdef CONFIG_PM		
+		if((gser->suspend_state == 0)&&(atomic_read(&port->gs_timer_inited) == 1)) {
+		#else
+		if(atomic_read(&port->gs_timer_inited) == 1){
+		#endif	
+			mod_timer(&(port->rx_timer), jiffies + RX_TIME_OUT);
+			//printk("gs_rx_push mod_timer.\n");
+		}
+	}
+	
+	tty = port->port_tty;
+	while (!list_empty(queue)) {
+		struct usb_request	*req;
+
+		req = list_first_entry(queue, struct usb_request, list);
+
+		/* discard data if tty was closed */
+		if (!tty)
+			goto recycle;
+
+		/* leave data queued if tty was rx throttled */
+		if (test_bit(TTY_THROTTLED, &tty->flags))
+			break;
+
+		switch (req->status) {
+		case -ESHUTDOWN:
+			disconnect = true;
+			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
+			break;
+
+		default:
+			/* presumably a transient fault */
+			pr_warning(PREFIX "%d: unexpected RX status %d\n",
+					port->port_num, req->status);
+			/* FALLTHROUGH */
+		case 0:
+			/* normal completion */
+			break;
+		}
+
+		/* push data to (open) tty */
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned	size = req->actual;
+			unsigned	n;
+			int		count;
+
+			/* we may have pushed part of this packet already... */
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+
+			count = tty_insert_flip_string(tty, packet, size);
+			if (count)
+				do_push = true;
+			if (count != size) {
+				/* stop pushing; TTY layer can't handle more */
+				port->n_read += count;
+				//pr_vdebug(PREFIX "%d: rx block %d/%d\n",port->port_num,	count, req->actual);
+				if((count == 0) && (port->flip_tty_flag == 1) && 
+					(port->read_complete == RX_QUEUE_SIZE) && 
+					((tty->buf.memory_used + size) >= 65536)){
+						printk("set do push,buf_use:%d, id:%d\n", tty->buf.memory_used, port->port_num);
+						do_push = true;
+				}
+				break;
+			}
+			port->n_read = 0;
+		}
+recycle:
+		list_move(&req->list, &port->read_pool);
+		port->read_started--;
+		port->read_complete--;
+	}
+
+	/* Push from tty to ldisc; without low_latency set this is handled by
+	 * a workqueue, so we won't get callbacks and can hold port_lock
+	 */
+	if (tty && do_push)
+		tty_flip_buffer_push(tty);
+
+
+	/* We want our data queue to become empty ASAP, keeping data
+	 * in the tty and ldisc (not here).  If we couldn't push any
+	 * this time around, there may be trouble unless there's an
+	 * implicit tty_unthrottle() call on its way...
+	 *
+	 * REVISIT we should probably add a timer to keep the tasklet
+	 * from starving ... but it's not clear that case ever happens.
+	 */
+	if (!list_empty(queue) && tty) {
+		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+			if (do_push)
+				tasklet_schedule(&port->push);
+#if 0			
+			else
+				pr_warning(PREFIX "%d: RX not scheduled?\n",
+					port->port_num);
+#endif			
+		}
+	}
+
+	if(port->flip_tty_flag == 1){
+		port->flip_tty_flag = 0;
+		tasklet_schedule(&port->push);
+	}
+	/* If we're still connected, refill the USB RX queue. */
+	if (!disconnect && port->port_usb)
+		gs_start_rx(port);
+	port->rx_push_run = false;
+	spin_unlock_irq(&port->port_lock);
+}
+
+static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gs_port	*port = ep->driver_data;
+
+	/* Queue all received data until the tty layer is ready for it. */
+	spin_lock(&port->port_lock);
+	list_add_tail(&req->list, &port->read_queue);
+	port->read_complete++;
+	tasklet_schedule(&port->push);
+	spin_unlock(&port->port_lock);
+}
+
+static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gs_port	*port = ep->driver_data;
+
+	spin_lock(&port->port_lock);
+	//list_add(&req->list, &port->write_pool);
+	list_add_tail(&req->list, &port->write_pool);
+	port->write_started--;
+
+	switch (req->status) {
+	default:
+		/* presumably a transient fault */
+		pr_warning("%s: unexpected %s status %d\n",
+				__func__, ep->name, req->status);
+		/* FALL THROUGH */
+	case 0:
+		/* normal completion */
+		gs_start_tx(port);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
+		break;
+	}
+
+	spin_unlock(&port->port_lock);
+}
+
+static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
+							 int *allocated)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(ep, req);
+		if (allocated)
+			(*allocated)--;
+	}
+}
+
+static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		void (*fn)(struct usb_ep *, struct usb_request *),
+		int *allocated)
+{
+	int			i;
+	struct usb_request	*req;
+	int n;
+#if 0	
+	int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
+#else
+	if(ep->address &0x80){
+		n = allocated ? TX_QUEUE_SIZE - *allocated : TX_QUEUE_SIZE;
+		//may be need more for zcat log
+			
+	}else	
+		n = allocated ? RX_QUEUE_SIZE - *allocated : RX_QUEUE_SIZE;
+#endif		
+
+	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
+	 * do quite that many this time, don't fail ... we just won't
+	 * be as speedy as we might otherwise be.
+	 */
+	for (i = 0; i < n; i++) {
+		//req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+		req = gs_alloc_req(ep, USB_SERIAL_MAX_TRANS, GFP_ATOMIC);
+		if (!req)
+			return list_empty(head) ? -ENOMEM : 0;
+		req->complete = fn;
+		list_add_tail(&req->list, head);
+		if (allocated)
+			(*allocated)++;
+	}
+	return 0;
+}
+
+/**
+ * gs_start_io - start USB I/O streams
+ * @dev: encapsulates endpoints to use
+ * Context: holding port_lock; port_tty and port_usb are non-null
+ *
+ * We only start I/O when something is connected to both sides of
+ * this port.  If nothing is listening on the host side, we may
+ * be pointlessly filling up our TX buffers and FIFO.
+ */
+static int gs_start_io(struct gs_port *port)
+{
+	struct list_head	*head = &port->read_pool;
+	struct usb_ep		*ep = port->port_usb->out;
+	int			status;
+	unsigned		started;
+
+	/* Allocate RX and TX I/O buffers.  We can't easily do this much
+	 * earlier (with GFP_KERNEL) because the requests are coupled to
+	 * endpoints, as are the packet sizes we'll be using.  Different
+	 * configurations may use different endpoints with a given port;
+	 * and high speed vs full speed changes packet sizes too.
+	 */
+	status = gs_alloc_requests(ep, head, gs_read_complete,
+		&port->read_allocated);
+	if (status)
+		return status;
+
+	status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
+			gs_write_complete, &port->write_allocated);
+	if (status) {
+		gs_free_requests(ep, head, &port->read_allocated);
+		return status;
+	}
+
+	/* queue read requests */
+	port->n_read = 0;
+	started = gs_start_rx(port);
+
+	/* unblock any pending writes into our circular buffer */
+	if (started) {
+		tty_wakeup(port->port_tty);
+	} else {
+		gs_free_requests(ep, head, &port->read_allocated);
+		gs_free_requests(port->port_usb->in, &port->write_pool,
+			&port->write_allocated);
+		status = -EIO;
+	}
+
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* TTY Driver */
+
+/*
+ * gs_open sets up the link between a gs_port and its associated TTY.
+ * That link is broken *only* by TTY close(), and all driver methods
+ * know that.
+ */
+static int gs_open(struct tty_struct *tty, struct file *file)
+{
+	int		port_num = tty->index;
+	struct gs_port	*port;
+	int		status;
+
+	do {
+		mutex_lock(&ports[port_num].lock);
+		port = ports[port_num].port;
+		if (!port)
+			status = -ENODEV;
+		else {
+			spin_lock_irq(&port->port_lock);
+
+			/* already open?  Great. */
+			if (port->open_count) {
+				status = 0;
+				port->open_count++;
+
+			/* currently opening/closing? wait ... */
+			} else if (port->openclose) {
+				status = -EBUSY;
+
+			/* ... else we do the work */
+			} else {
+				status = -EAGAIN;
+				port->openclose = true;
+			}
+			spin_unlock_irq(&port->port_lock);
+		}
+		mutex_unlock(&ports[port_num].lock);
+		if(port)
+			printk("gs_open, port->open_count:%d\n", port->open_count);
+
+		switch (status) {
+		default:
+			/* fully handled */
+			return status;
+		case -EAGAIN:
+			/* must do the work */
+			break;
+		case -EBUSY:
+			/* wait for EAGAIN task to finish */
+			msleep(1);
+			/* REVISIT could have a waitchannel here, if
+			 * concurrent open performance is important
+			 */
+			break;
+		}
+	} while (status != -EAGAIN);
+
+	/* Do the "real open" */
+	spin_lock_irq(&port->port_lock);
+
+	/* allocate circular buffer on first open */
+	if (port->port_write_buf.buf_buf == NULL) {
+
+		spin_unlock_irq(&port->port_lock);
+		status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
+		spin_lock_irq(&port->port_lock);
+
+		if (status) {
+			pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
+				port->port_num, tty, file);
+			port->openclose = false;
+			goto exit_unlock_port;
+		}
+	}
+
+	/* REVISIT if REMOVED (ports[].port NULL), abort the open
+	 * to let rmmod work faster (but this way isn't wrong).
+	 */
+
+	/* REVISIT maybe wait for "carrier detect" */
+
+	tty->driver_data = port;
+	port->port_tty = tty;
+
+	port->open_count = 1;
+	port->openclose = false;
+
+	/* if connected, start the I/O stream */
+	if (port->port_usb) {
+		struct gserial	*gser = port->port_usb;
+
+		pr_debug("gs_open: start ttyGS%d\n", port->port_num);
+		gs_start_io(port);
+
+		if (gser->connect)
+			gser->connect(gser);
+	}
+
+	if(atomic_read(&port->gs_timer_inited) == 0){
+		printk("gs_open init_timer entry.\n");
+		init_timer(&(port->rx_timer));
+		printk("gs_open init_timer exit.\n");
+		port->rx_timer.data = (unsigned long)(port);
+		port->rx_timer.function = (void *)gs_rx_push_timeout;
+		port->rx_timer.expires = jiffies + RX_TIME_OUT;
+		atomic_set(&port->gs_timer_inited,1);
+	}	
+
+	pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
+
+	status = 0;
+
+exit_unlock_port:
+	spin_unlock_irq(&port->port_lock);
+	return status;
+}
+
+static int gs_writes_finished(struct gs_port *p)
+{
+	int cond;
+
+	/* return true on disconnect or empty buffer */
+	spin_lock_irq(&p->port_lock);
+	cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
+	spin_unlock_irq(&p->port_lock);
+
+	return cond;
+}
+
+static void gs_close(struct tty_struct *tty, struct file *file)
+{
+	struct gs_port *port = tty->driver_data;
+	struct gserial	*gser;
+	printk("gs_close, port->open_count:%d\n", port->open_count);
+	spin_lock_irq(&port->port_lock);
+
+	if (port->open_count != 1) {
+		if (port->open_count == 0)
+			WARN_ON(1);
+		else
+			--port->open_count;
+		goto exit;
+	}
+	spin_unlock_irq(&port->port_lock);
+	if(atomic_read(&port->gs_timer_inited) == 1){
+		atomic_set(&port->gs_timer_inited,0);
+		printk("gs_close del_timer_sync entry.\n");
+		del_timer_sync(&(port->rx_timer));
+		printk("gs_close del_timer_sync exit.\n");
+	}
+	spin_lock_irq(&port->port_lock);
+	pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
+
+	/* mark port as closing but in use; we can drop port lock
+	 * and sleep if necessary
+	 */
+	port->openclose = true;
+	port->open_count = 0;
+
+	gser = port->port_usb;
+	if (gser && gser->disconnect)
+		gser->disconnect(gser);
+
+	/* wait for circular write buffer to drain, disconnect, or at
+	 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
+	 */
+	if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
+		spin_unlock_irq(&port->port_lock);
+		wait_event_interruptible_timeout(port->drain_wait,
+					gs_writes_finished(port),
+					GS_CLOSE_TIMEOUT * HZ);
+		spin_lock_irq(&port->port_lock);
+		gser = port->port_usb;
+	}
+
+	/* Iff we're disconnected, there can be no I/O in flight so it's
+	 * ok to free the circular buffer; else just scrub it.  And don't
+	 * let the push tasklet fire again until we're re-opened.
+	 */
+	if (gser == NULL)
+		gs_buf_free(&port->port_write_buf);
+	else
+		gs_buf_clear(&port->port_write_buf);
+
+	tty->driver_data = NULL;
+	port->port_tty = NULL;
+
+	port->openclose = false;
+	port->rx_push_run = false;
+	port->flip_tty_flag = 0;
+	
+	pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
+			port->port_num, tty, file);
+
+	wake_up_interruptible(&port->close_wait);
+exit:
+	spin_unlock_irq(&port->port_lock);
+}
+
+static int gs_err_num = 0;
+static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	int		status;
+
+	if (port == NULL)
+	{
+		return -ENODEV;
+	}
+
+	pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
+			port->port_num, tty, count);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (count)
+		count = gs_buf_put(&port->port_write_buf, buf, count);
+		
+	if(count < 0){
+		if(gs_err_num == 0){
+			usb_printk("gs_write error count err no menmory,ttyGS:%d\n",port->port_num);
+		}
+		gs_err_num++;
+		if(gs_err_num == 3000)
+		    gs_err_num = 0;
+		//return count;
+	}
+	/* treat count == 0 as flush_chars() */
+	if (port->port_usb)
+		status = gs_start_tx(port);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return count;
+}
+
+static int gs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	int		status;
+	if (port == NULL)
+	{
+		return -ENODEV;
+	}
+
+	pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
+		port->port_num, tty, ch, __builtin_return_address(0));
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	status = gs_buf_put(&port->port_write_buf, &ch, 1);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return status;
+}
+
+static void gs_flush_chars(struct tty_struct *tty)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	if (port == NULL)
+	{
+		return ;
+	}
+
+	pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb)
+		gs_start_tx(port);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int gs_write_room(struct tty_struct *tty)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	int		room = 0;
+	if (port == NULL)
+	{
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb)
+		room = gs_buf_space_avail(&port->port_write_buf);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
+		port->port_num, tty, room);
+
+	return room;
+}
+
+static int gs_chars_in_buffer(struct tty_struct *tty)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	int		chars = 0;
+	if (port == NULL)
+	{
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	chars = gs_buf_data_avail(&port->port_write_buf);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+		port->port_num, tty, chars);
+
+	return chars;
+}
+
+/* undo side effects of setting TTY_THROTTLED */
+static void gs_unthrottle(struct tty_struct *tty)
+{
+	struct gs_port		*port = tty->driver_data;
+	unsigned long		flags;
+
+	if(port == NULL){
+		usb_printk("gs_unthrottle tty driver_data is NULL\n");
+		return;
+	}
+	
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb) {
+		/* Kickstart read queue processing.  We don't do xon/xoff,
+		 * rts/cts, or other handshaking with the host, but if the
+		 * read queue backs up enough we'll be NAKing OUT packets.
+		 */
+		tasklet_schedule(&port->push);
+		pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int gs_break_ctl(struct tty_struct *tty, int duration)
+{
+	struct gs_port	*port = tty->driver_data;
+	int		status = 0;
+	struct gserial	*gser;
+	if (port == NULL)
+	{
+		return -ENODEV;
+	}
+
+	pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
+			port->port_num, duration);
+
+	spin_lock_irq(&port->port_lock);
+	gser = port->port_usb;
+	if (gser && gser->send_break)
+		status = gser->send_break(gser, duration);
+	spin_unlock_irq(&port->port_lock);
+
+	return status;
+}
+
+static const struct tty_operations gs_tty_ops = {
+	.open =			gs_open,
+	.close =		gs_close,
+	.write =		gs_write,
+	.put_char =		gs_put_char,
+	.flush_chars =		gs_flush_chars,
+	.write_room =		gs_write_room,
+	.chars_in_buffer =	gs_chars_in_buffer,
+	.unthrottle =		gs_unthrottle,
+	.break_ctl =		gs_break_ctl,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static struct tty_driver *gs_tty_driver;
+
+static int
+gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
+{
+	struct gs_port	*port;
+
+	port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
+	if (port == NULL)
+		return -ENOMEM;
+
+	spin_lock_init(&port->port_lock);
+	init_waitqueue_head(&port->close_wait);
+	init_waitqueue_head(&port->drain_wait);
+
+	INIT_WORK(&port->online_inform_work, gserial_inform_online_work);
+	INIT_WORK(&port->offline_inform_work, gserial_inform_offline_work);
+	tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
+
+	INIT_LIST_HEAD(&port->read_pool);
+	INIT_LIST_HEAD(&port->read_queue);
+	INIT_LIST_HEAD(&port->write_pool);
+
+	port->port_num = port_num;
+	port->port_line_coding = *coding;
+	port->binded = 0;
+	atomic_set(&port->gs_timer_inited, 0);
+
+	ports[port_num].port = port;
+
+	return 0;
+}
+
+/**
+ * gserial_setup - initialize TTY driver for one or more ports
+ * @g: gadget to associate with these ports
+ * @count: how many ports to support
+ * Context: may sleep
+ *
+ * The TTY stack needs to know in advance how many devices it should
+ * plan to manage.  Use this call to set up the ports you will be
+ * exporting through USB.  Later, connect them to functions based
+ * on what configuration is activated by the USB host; and disconnect
+ * them as appropriate.
+ *
+ * An example would be a two-configuration device in which both
+ * configurations expose port 0, but through different functions.
+ * One configuration could even expose port 1 while the other
+ * one doesn't.
+ *
+ * Returns negative errno or zero.
+ */
+int gserial_setup(struct usb_gadget *g, unsigned count)
+{
+	unsigned			i;
+	struct usb_cdc_line_coding	coding;
+	int				status;
+
+	if (count == 0 || count > N_PORTS)
+		return -EINVAL;
+
+	if (gs_tty_driver){
+		/*because we need support acm&serial at the same time, 
+		 *so  gs_tty_driver can not alloccing duplicate
+		 */
+		 return 0;
+	}		 
+	
+	gs_tty_driver = alloc_tty_driver(count);
+
+	if (!gs_tty_driver)
+		return -ENOMEM;
+	
+	
+	gs_tty_driver->driver_name = "g_serial";
+	gs_tty_driver->name = PREFIX;
+	/* uses dynamically assigned dev_t values */
+
+	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	gs_tty_driver->init_termios = tty_std_termios;
+
+	/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
+	 * MS-Windows.  Otherwise, most of these flags shouldn't affect
+	 * anything unless we were to actually hook up to a serial line.
+	 */
+	 memset(&gs_tty_driver->init_termios,0,sizeof(gs_tty_driver->init_termios));
+	gs_tty_driver->init_termios.c_cflag =
+			B115200 | CS8 | CREAD | CLOCAL;
+	gs_tty_driver->init_termios.c_cflag &= ~CRTSCTS; //no flow control
+	gs_tty_driver->init_termios.c_cflag &= ~CSIZE; //¿ØÖÆÄ£Ê½, ÆÁ±Î×Ö·û´óСλ
+	gs_tty_driver->init_termios.c_cflag &= ~PARENB; //no parity check
+	gs_tty_driver->init_termios.c_cflag &= ~CSTOPB; // ¿ØÖÆÄ£Ê½, stop bits
+	gs_tty_driver->init_termios.c_cflag &= ~OPOST;            //Êä³öģʽ, ԭʼÊý¾ÝÊä³ö
+	gs_tty_driver->init_termios.c_cc[VMIN] = 1;               //¿ØÖÆ×Ö·û, ËùÒª¶ÁÈ¡×Ö·ûµÄ×îСÊýÁ¿
+       gs_tty_driver->init_termios.c_cc[VTIME] = 1;              //¿ØÖÆ×Ö·û, ¶ÁÈ¡µÚÒ»¸ö×Ö·ûµÄµÈ´ýʱ¼ä£¬unit: (1/10)second
+	gs_tty_driver->init_termios.c_ispeed = 115200;
+	gs_tty_driver->init_termios.c_ospeed = 115200;
+
+	coding.dwDTERate = cpu_to_le32(115200);
+	coding.bCharFormat = 8;
+	coding.bParityType = USB_CDC_NO_PARITY;
+	coding.bDataBits = USB_CDC_1_STOP_BITS;
+
+	tty_set_operations(gs_tty_driver, &gs_tty_ops);
+
+	/* make devices be openable */
+	for (i = 0; i < count; i++) {
+		mutex_init(&ports[i].lock);
+		status = gs_port_alloc(i, &coding);
+		if (status) {
+			count = i;
+			goto fail;
+		}
+	}
+	n_ports = count;
+
+	/* export the driver ... */
+	status = tty_register_driver(gs_tty_driver);
+	if (status) {
+		pr_err("%s: cannot register, err %d\n",
+				__func__, status);
+		goto fail;
+	}
+
+	/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
+	for (i = 0; i < count; i++) {
+		struct device	*tty_dev;
+
+		tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
+		if (IS_ERR(tty_dev))
+			pr_warning("%s: no classdev for port %d, err %ld\n",
+				__func__, i, PTR_ERR(tty_dev));
+		ports[i].tty_dev = tty_dev;
+	}
+
+	pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
+			count, (count == 1) ? "" : "s");
+
+	return status;
+fail:
+	while (count--)
+		kfree(ports[count].port);
+	put_tty_driver(gs_tty_driver);
+	gs_tty_driver = NULL;
+	return status;
+}
+
+static int gs_closed(struct gs_port *port)
+{
+	int cond;
+
+	spin_lock_irq(&port->port_lock);
+	cond = (port->open_count == 0) && !port->openclose;
+	spin_unlock_irq(&port->port_lock);
+	return cond;
+}
+
+/**
+ * gserial_cleanup - remove TTY-over-USB driver and devices
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gserial_setup().
+ * Accordingly, it may need to wait until some open /dev/ files have
+ * closed.
+ *
+ * The caller must have issued @gserial_disconnect() for any ports
+ * that had previously been connected, so that there is never any
+ * I/O pending when it's called.
+ */
+void gserial_cleanup(void)
+{
+	unsigned	i;
+	struct gs_port	*port;
+
+	if (!gs_tty_driver)
+		return;
+
+	/* start sysfs and /dev/ttyGS* node removal */
+	for (i = 0; i < n_ports; i++)
+		tty_unregister_device(gs_tty_driver, i);
+
+	for (i = 0; i < n_ports; i++) {
+		/* prevent new opens */
+		mutex_lock(&ports[i].lock);
+		port = ports[i].port;
+		ports[i].port = NULL;
+		mutex_unlock(&ports[i].lock);
+
+		tasklet_kill(&port->push);
+
+		/* wait for old opens to finish */
+		wait_event(port->close_wait, gs_closed(port));
+
+		WARN_ON(port->port_usb != NULL);
+
+		kfree(port);
+	}
+	n_ports = 0;
+
+	tty_unregister_driver(gs_tty_driver);
+	put_tty_driver(gs_tty_driver);
+	gs_tty_driver = NULL;
+
+	pr_debug("%s: cleaned up ttyGS* support\n", __func__);
+}
+
+/**
+ * gserial_connect - notify TTY I/O glue that USB link is active
+ * @gser: the function, set up with endpoints and descriptors
+ * @port_num: which port is active
+ * Context: any (usually from irq)
+ *
+ * This is called activate endpoints and let the TTY layer know that
+ * the connection is active ... not unlike "carrier detect".  It won't
+ * necessarily start I/O queues; unless the TTY is held open by any
+ * task, there would be no point.  However, the endpoints will be
+ * activated so the USB host can perform I/O, subject to basic USB
+ * hardware flow control.
+ *
+ * Caller needs to have set up the endpoints and USB function in @dev
+ * before calling this, as well as the appropriate (speed-specific)
+ * endpoint descriptors, and also have set up the TTY driver by calling
+ * @gserial_setup().
+ *
+ * Returns negative errno or zero.
+ * On success, ep->driver_data will be overwritten.
+ */
+int gserial_connect(struct gserial *gser, u8 *port_num)
+{
+	struct gs_port	*port = NULL;
+	unsigned long	flags;
+	int		status;
+	int port_line = 0;
+
+	if (!gs_tty_driver){
+		printk("gserial_connect, gs_tty_driver is null\n");
+		return -ENXIO;
+	}
+	for(port_line = 0; port_line < N_PORTS; port_line++){
+		/* we "know" gserial_cleanup() hasn't been called */
+		printk("--gserial_connect--, ports[port_line].port->binded :%d\n", ports[port_line].port->binded );
+		if(ports[port_line].port->binded == 0){
+			port = ports[port_line].port;
+			*port_num = port_line;
+			port->binded = 1;
+			break;
+		}
+	}
+
+	if(!port)
+		return -ENXIO;	
+
+	/* activate the endpoints */
+	status = usb_ep_enable(gser->in);
+	if (status < 0)
+		return status;
+	gser->in->driver_data = port;
+
+	status = usb_ep_enable(gser->out);
+	if (status < 0)
+		goto fail_out;
+	gser->out->driver_data = port;
+
+	/* then tell the tty glue that I/O can work */
+	spin_lock_irqsave(&port->port_lock, flags);
+	gser->ioport = port;
+	port->port_usb = gser;
+
+	/* REVISIT unclear how best to handle this state...
+	 * we don't really couple it with the Linux TTY.
+	 */
+	gser->port_line_coding = port->port_line_coding;
+
+	/* REVISIT if waiting on "carrier detect", signal. */
+      	usb_printk("gserial connect ttyGS%d\n",port->port_num);
+	schedule_work(&port->online_inform_work);
+#if 1
+	#ifndef CONFIG_SYSTEM_RECOVERY
+		g_bUsbDevHotAdd[port->port_num] = true;
+	#endif
+#endif
+	/* if it's already open, start I/O ... and notify the serial
+	 * protocol about open/close status (connect/disconnect).
+	 */
+	if (port->open_count) {
+		pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
+		gs_start_io(port);
+		if (gser->connect)
+			gser->connect(gser);
+	} else {
+		if (gser->disconnect)
+			gser->disconnect(gser);
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return status;
+
+fail_out:
+	usb_ep_disable(gser->in);
+	gser->in->driver_data = NULL;
+	return status;
+}
+
+/**
+ * gserial_disconnect - notify TTY I/O glue that USB link is inactive
+ * @gser: the function, on which gserial_connect() was called
+ * Context: any (usually from irq)
+ *
+ * This is called to deactivate endpoints and let the TTY layer know
+ * that the connection went inactive ... not unlike "hangup".
+ *
+ * On return, the state is as if gserial_connect() had never been called;
+ * there is no active USB I/O on these endpoints.
+ */
+void gserial_disconnect(struct gserial *gser)
+{
+	struct gs_port	*port = gser->ioport;
+	unsigned long	flags;
+
+	if (!port)
+		return;
+
+		printk("gserial_disconnect, port off_report:%d \n", port->off_report);
+	/* tell the TTY glue not to do I/O here any more */
+	spin_lock_irqsave(&port->port_lock, flags);
+	if(port->off_report == 0){
+      	 usb_printk("gserial disconnect ttyGS%d\n",port->port_num);
+		 schedule_work(&port->offline_inform_work);
+	}
+	port->off_report = 0;
+#if 1
+	#ifndef CONFIG_SYSTEM_RECOVERY
+		g_bUsbDevHotAdd[port->port_num] = false;
+	#endif
+#endif
+	/* REVISIT as above: how best to track this? */
+	port->port_line_coding = gser->port_line_coding;
+
+	port->port_usb = NULL;
+	gser->ioport = NULL;
+	if (port->open_count > 0 || port->openclose) {
+		wake_up_interruptible(&port->drain_wait);
+		if (port->port_tty)
+			tty_hangup(port->port_tty);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* disable endpoints, aborting down any active I/O */
+	usb_ep_disable(gser->out);
+	gser->out->driver_data = NULL;
+
+	usb_ep_disable(gser->in);
+	gser->in->driver_data = NULL;
+
+	/* finally, free any unused/unusable I/O buffers */
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->open_count == 0 && !port->openclose)
+		gs_buf_free(&port->port_write_buf);
+	gs_free_requests(gser->out, &port->read_pool, NULL);
+	gs_free_requests(gser->out, &port->read_queue, NULL);
+	gs_free_requests(gser->in, &port->write_pool, NULL);
+
+	port->read_allocated = port->read_started = port->read_complete =
+		port->write_allocated = port->write_started = 0;
+	port->binded = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gserial_disconnect_ext(struct gserial *gser)
+{	
+    struct gs_port	*port = gser->ioport;	
+	unsigned long	flags;	
+	if (!port)
+		return;	/* tell the TTY glue not to do I/O here any more */	
+	spin_lock_irqsave(&port->port_lock, flags);      
+	usb_printk("gserial disconnect ext ttyGS%d\n",port->port_num);
+#if 0	
+	if(n_ports == 4)
+	{
+	    if((port->port_num != 0) && (port->port_num != 2))
+    	{
+    	    schedule_work(&port->offline_inform_work);	
+			port->off_report = 1;
+    	}
+	}
+	else 
+	{
+		if(port->port_num != 0)
+		{
+			schedule_work(&port->offline_inform_work);	
+			port->off_report = 1;
+		}
+	}
+#else
+	schedule_work(&port->offline_inform_work);	
+	port->off_report = 1;
+#endif	
+	spin_unlock_irqrestore(&port->port_lock, flags);
+#if 1
+	#ifndef CONFIG_SYSTEM_RECOVERY
+		g_bUsbDevHotAdd[port->port_num] = false;
+	#endif
+#endif
+}
+
+void gserial_connect_ext(struct gserial *gser)
+{	
+    struct gs_port	*port = gser->ioport;	
+	unsigned long	flags;	
+	if (!port)
+		return;	/* tell the TTY glue not to do I/O here any more */	
+	
+	printk("port:0x%p, owner:0x%p\n", port, port->port_lock.lock.owner);	
+	spin_lock_irqsave(&port->port_lock, flags);      
+	usb_printk("gserial connect ext ttyGS%d\n",port->port_num);
+#if 0	
+	if(n_ports == 4)
+	{
+	    if((port->port_num != 0) && (port->port_num != 2))
+    	{
+    	    schedule_work(&port->online_inform_work);
+    	}
+	}
+	else if(port->port_num != 0)
+	{
+        schedule_work(&port->online_inform_work);
+	}
+#else
+	schedule_work(&port->online_inform_work);
+#endif	
+	spin_unlock_irqrestore(&port->port_lock, flags);
+#if 1
+	#ifndef CONFIG_SYSTEM_RECOVERY
+		g_bUsbDevHotAdd[port->port_num] = true;
+	#endif
+#endif
+}
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.h
new file mode 100644
index 0000000..d976067
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.h
@@ -0,0 +1,68 @@
+/*
+ * u_serial.h - interface to USB gadget "serial port"/TTY utilities
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+#ifndef __U_SERIAL_H
+#define __U_SERIAL_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+/*
+ * One non-multiplexed "serial" I/O port ... there can be several of these
+ * on any given USB peripheral device, if it provides enough endpoints.
+ *
+ * The "u_serial" utility component exists to do one thing:  manage TTY
+ * style I/O using the USB peripheral endpoints listed here, including
+ * hookups to sysfs and /dev for each logical "tty" device.
+ *
+ * REVISIT at least ACM could support tiocmget() if needed.
+ *
+ * REVISIT someday, allow multiplexing several TTYs over these endpoints.
+ */
+struct gserial {
+	struct usb_function		func;
+
+	/* port is managed by gserial_{connect,disconnect} */
+	struct gs_port			*ioport;
+
+	struct usb_ep			*in;
+	struct usb_ep			*out;
+
+	/* REVISIT avoid this CDC-ACM support harder ... */
+	struct usb_cdc_line_coding port_line_coding;	/* 9600-8-N-1 etc */
+
+	/* notification callbacks */
+	void (*connect)(struct gserial *p);
+	void (*disconnect)(struct gserial *p);
+	int (*send_break)(struct gserial *p, int duration);
+#ifdef CONFIG_PM
+	u32 suspend_state;
+#endif
+};
+
+/* utilities to allocate/free request and buffer */
+struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
+void gs_free_req(struct usb_ep *, struct usb_request *req);
+
+/* port setup/teardown is handled by gadget driver */
+int gserial_setup(struct usb_gadget *g, unsigned n_ports);
+void gserial_cleanup(void);
+
+/* connect/disconnect is handled by individual functions */
+int gserial_connect(struct gserial *, u8 *port_num);
+void gserial_disconnect(struct gserial *);
+
+/* functions are bound to configurations by a config or gadget driver */
+int acm_bind_config(struct usb_configuration *c, u8 port_num);
+int gser_bind_config(struct usb_configuration *c, u8 port_num);
+int obex_bind_config(struct usb_configuration *c, u8 port_num);
+
+#endif /* __U_SERIAL_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_uac1.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_uac1.c
new file mode 100644
index 0000000..af98989
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_uac1.c
@@ -0,0 +1,327 @@
+/*
+ * u_uac1.c -- ALSA audio utilities for Gadget stack
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+
+#include "u_uac1.h"
+
+/*
+ * This component encapsulates the ALSA devices for USB audio gadget
+ */
+
+#define FILE_PCM_PLAYBACK	"/dev/snd/pcmC0D0p"
+#define FILE_PCM_CAPTURE	"/dev/snd/pcmC0D0c"
+#define FILE_CONTROL		"/dev/snd/controlC0"
+
+static char *fn_play = FILE_PCM_PLAYBACK;
+module_param(fn_play, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
+
+static char *fn_cap = FILE_PCM_CAPTURE;
+module_param(fn_cap, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_cap, "Capture PCM device file name");
+
+static char *fn_cntl = FILE_CONTROL;
+module_param(fn_cntl, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_cntl, "Control device file name");
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Some ALSA internal helper functions
+ */
+static int snd_interval_refine_set(struct snd_interval *i, unsigned int val)
+{
+	struct snd_interval t;
+	t.empty = 0;
+	t.min = t.max = val;
+	t.openmin = t.openmax = 0;
+	t.integer = 1;
+	return snd_interval_refine(i, &t);
+}
+
+static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
+				 snd_pcm_hw_param_t var, unsigned int val,
+				 int dir)
+{
+	int changed;
+	if (hw_is_mask(var)) {
+		struct snd_mask *m = hw_param_mask(params, var);
+		if (val == 0 && dir < 0) {
+			changed = -EINVAL;
+			snd_mask_none(m);
+		} else {
+			if (dir > 0)
+				val++;
+			else if (dir < 0)
+				val--;
+			changed = snd_mask_refine_set(
+					hw_param_mask(params, var), val);
+		}
+	} else if (hw_is_interval(var)) {
+		struct snd_interval *i = hw_param_interval(params, var);
+		if (val == 0 && dir < 0) {
+			changed = -EINVAL;
+			snd_interval_none(i);
+		} else if (dir == 0)
+			changed = snd_interval_refine_set(i, val);
+		else {
+			struct snd_interval t;
+			t.openmin = 1;
+			t.openmax = 1;
+			t.empty = 0;
+			t.integer = 0;
+			if (dir < 0) {
+				t.min = val - 1;
+				t.max = val;
+			} else {
+				t.min = val;
+				t.max = val+1;
+			}
+			changed = snd_interval_refine(i, &t);
+		}
+	} else
+		return -EINVAL;
+	if (changed) {
+		params->cmask |= 1 << var;
+		params->rmask |= 1 << var;
+	}
+	return changed;
+}
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Set default hardware params
+ */
+static int playback_default_hw_params(struct gaudio_snd_dev *snd)
+{
+	struct snd_pcm_substream *substream = snd->substream;
+	struct snd_pcm_hw_params *params;
+	snd_pcm_sframes_t result;
+
+       /*
+	* SNDRV_PCM_ACCESS_RW_INTERLEAVED,
+	* SNDRV_PCM_FORMAT_S16_LE
+	* CHANNELS: 2
+	* RATE: 48000
+	*/
+	snd->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED;
+	snd->format = SNDRV_PCM_FORMAT_S16_LE;
+	snd->channels = 2;
+	snd->rate = 48000;
+
+	params = kzalloc(sizeof(*params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	_snd_pcm_hw_params_any(params);
+	_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS,
+			snd->access, 0);
+	_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			snd->format, 0);
+	_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS,
+			snd->channels, 0);
+	_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE,
+			snd->rate, 0);
+
+	snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+	snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, params);
+
+	result = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL);
+	if (result < 0) {
+		ERROR(snd->card,
+			"Preparing sound card failed: %d\n", (int)result);
+		kfree(params);
+		return result;
+	}
+
+	/* Store the hardware parameters */
+	snd->access = params_access(params);
+	snd->format = params_format(params);
+	snd->channels = params_channels(params);
+	snd->rate = params_rate(params);
+
+	kfree(params);
+
+	INFO(snd->card,
+		"Hardware params: access %x, format %x, channels %d, rate %d\n",
+		snd->access, snd->format, snd->channels, snd->rate);
+
+	return 0;
+}
+
+/**
+ * Playback audio buffer data by ALSA PCM device
+ */
+static size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
+{
+	struct gaudio_snd_dev	*snd = &card->playback;
+	struct snd_pcm_substream *substream = snd->substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	mm_segment_t old_fs;
+	ssize_t result;
+	snd_pcm_sframes_t frames;
+
+try_again:
+	if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
+		runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
+		result = snd_pcm_kernel_ioctl(substream,
+				SNDRV_PCM_IOCTL_PREPARE, NULL);
+		if (result < 0) {
+			ERROR(card, "Preparing sound card failed: %d\n",
+					(int)result);
+			return result;
+		}
+	}
+
+	frames = bytes_to_frames(runtime, count);
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	result = snd_pcm_lib_write(snd->substream, buf, frames);
+	if (result != frames) {
+		ERROR(card, "Playback error: %d\n", (int)result);
+		set_fs(old_fs);
+		goto try_again;
+	}
+	set_fs(old_fs);
+
+	return 0;
+}
+
+static int u_audio_get_playback_channels(struct gaudio *card)
+{
+	return card->playback.channels;
+}
+
+static int u_audio_get_playback_rate(struct gaudio *card)
+{
+	return card->playback.rate;
+}
+
+/**
+ * Open ALSA PCM and control device files
+ * Initial the PCM or control device
+ */
+static int gaudio_open_snd_dev(struct gaudio *card)
+{
+	struct snd_pcm_file *pcm_file;
+	struct gaudio_snd_dev *snd;
+
+	if (!card)
+		return -ENODEV;
+
+	/* Open control device */
+	snd = &card->control;
+	snd->filp = filp_open(fn_cntl, O_RDWR, 0);
+	if (IS_ERR(snd->filp)) {
+		int ret = PTR_ERR(snd->filp);
+		ERROR(card, "unable to open sound control device file: %s\n",
+				fn_cntl);
+		snd->filp = NULL;
+		return ret;
+	}
+	snd->card = card;
+
+	/* Open PCM playback device and setup substream */
+	snd = &card->playback;
+	snd->filp = filp_open(fn_play, O_WRONLY, 0);
+	if (IS_ERR(snd->filp)) {
+		ERROR(card, "No such PCM playback device: %s\n", fn_play);
+		snd->filp = NULL;
+	}
+	pcm_file = snd->filp->private_data;
+	snd->substream = pcm_file->substream;
+	snd->card = card;
+	playback_default_hw_params(snd);
+
+	/* Open PCM capture device and setup substream */
+	snd = &card->capture;
+	snd->filp = filp_open(fn_cap, O_RDONLY, 0);
+	if (IS_ERR(snd->filp)) {
+		ERROR(card, "No such PCM capture device: %s\n", fn_cap);
+		snd->substream = NULL;
+		snd->card = NULL;
+		snd->filp = NULL;
+	} else {
+		pcm_file = snd->filp->private_data;
+		snd->substream = pcm_file->substream;
+		snd->card = card;
+	}
+
+	return 0;
+}
+
+/**
+ * Close ALSA PCM and control device files
+ */
+static int gaudio_close_snd_dev(struct gaudio *gau)
+{
+	struct gaudio_snd_dev	*snd;
+
+	/* Close control device */
+	snd = &gau->control;
+	if (snd->filp)
+		filp_close(snd->filp, current->files);
+
+	/* Close PCM playback device and setup substream */
+	snd = &gau->playback;
+	if (snd->filp)
+		filp_close(snd->filp, current->files);
+
+	/* Close PCM capture device and setup substream */
+	snd = &gau->capture;
+	if (snd->filp)
+		filp_close(snd->filp, current->files);
+
+	return 0;
+}
+
+static struct gaudio *the_card;
+/**
+ * gaudio_setup - setup ALSA interface and preparing for USB transfer
+ *
+ * This sets up PCM, mixer or MIDI ALSA devices fore USB gadget using.
+ *
+ * Returns negative errno, or zero on success
+ */
+int __init gaudio_setup(struct gaudio *card)
+{
+	int	ret;
+
+	ret = gaudio_open_snd_dev(card);
+	if (ret)
+		ERROR(card, "we need at least one control device\n");
+	else if (!the_card)
+		the_card = card;
+
+	return ret;
+
+}
+
+/**
+ * gaudio_cleanup - remove ALSA device interface
+ *
+ * This is called to free all resources allocated by @gaudio_setup().
+ */
+void gaudio_cleanup(void)
+{
+	if (the_card) {
+		gaudio_close_snd_dev(the_card);
+		the_card = NULL;
+	}
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_uac1.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_uac1.h
new file mode 100644
index 0000000..18c2e72
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_uac1.h
@@ -0,0 +1,56 @@
+/*
+ * u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __U_AUDIO_H
+#define __U_AUDIO_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/composite.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "gadget_chips.h"
+
+/*
+ * This represents the USB side of an audio card device, managed by a USB
+ * function which provides control and stream interfaces.
+ */
+
+struct gaudio_snd_dev {
+	struct gaudio			*card;
+	struct file			*filp;
+	struct snd_pcm_substream	*substream;
+	int				access;
+	int				format;
+	int				channels;
+	int				rate;
+};
+
+struct gaudio {
+	struct usb_function		func;
+	struct usb_gadget		*gadget;
+
+	/* ALSA sound device interfaces */
+	struct gaudio_snd_dev		control;
+	struct gaudio_snd_dev		playback;
+	struct gaudio_snd_dev		capture;
+
+	/* TODO */
+};
+
+int gaudio_setup(struct gaudio *card);
+void gaudio_cleanup(void);
+
+#endif /* __U_AUDIO_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/udc-core.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/udc-core.c
new file mode 100644
index 0000000..dfaf9fb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/udc-core.c
@@ -0,0 +1,549 @@
+/**
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <mach/highspeed_debug.h>
+
+#ifndef DEBUG
+#define DEBUG
+#endif
+#define pr_debug(fmt, ...) \
+	printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * struct usb_udc - describes one usb device controller
+ * @driver - the gadget driver pointer. For use by the class code
+ * @dev - the child device to the actual controller
+ * @gadget - the gadget. For use by the class code
+ * @list - for use by the udc class driver
+ *
+ * This represents the internal data structure which is used by the UDC-class
+ * to hold information about udc driver and gadget together.
+ */
+struct usb_udc {
+	struct usb_gadget_driver	*driver;
+	struct usb_gadget		*gadget;
+	struct device			dev;
+	struct list_head		list;
+};
+
+static struct class *udc_class;
+static LIST_HEAD(udc_list);
+static DEFINE_MUTEX(udc_lock);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_map_request(struct usb_gadget *gadget,
+		struct usb_request *req, int is_in)
+{
+	if (req->length == 0)
+		return 0;
+
+	if (req->num_sgs) {
+		int     mapped;
+
+		mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs,
+				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		if (mapped == 0) {
+			dev_err(&gadget->dev, "failed to map SGs\n");
+			return -EFAULT;
+		}
+
+		req->num_mapped_sgs = mapped;
+	} else {
+		req->dma = dma_map_single(&gadget->dev, req->buf, req->length,
+				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+		if (dma_mapping_error(&gadget->dev, req->dma)) {
+			dev_err(&gadget->dev, "failed to map buffer\n");
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request);
+
+void usb_gadget_unmap_request(struct usb_gadget *gadget,
+		struct usb_request *req, int is_in)
+{
+	if (req->length == 0)
+		return;
+
+	if (req->num_mapped_sgs) {
+		dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs,
+				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+		req->num_mapped_sgs = 0;
+	} else {
+		dma_unmap_single(&gadget->dev, req->dma, req->length,
+				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	}
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_start - tells usb device controller to start up
+ * @gadget: The gadget we want to get started
+ * @driver: The driver we want to bind to @gadget
+ * @bind: The bind function for @driver
+ *
+ * This call is issued by the UDC Class driver when it's about
+ * to register a gadget driver to the device controller, before
+ * calling gadget driver's bind() method.
+ *
+ * It allows the controller to be powered off until strictly
+ * necessary to have it powered on.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_start(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	return gadget->ops->start(driver, bind);
+}
+
+/**
+ * usb_gadget_udc_start - tells usb device controller to start up
+ * @gadget: The gadget we want to get started
+ * @driver: The driver we want to bind to @gadget
+ *
+ * This call is issued by the UDC Class driver when it's about
+ * to register a gadget driver to the device controller, before
+ * calling gadget driver's bind() method.
+ *
+ * It allows the controller to be powered off until strictly
+ * necessary to have it powered on.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_udc_start(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver)
+{
+	return gadget->ops->udc_start(gadget, driver);
+}
+
+/**
+ * usb_gadget_stop - tells usb device controller we don't need it anymore
+ * @gadget: The device we want to stop activity
+ * @driver: The driver to unbind from @gadget
+ *
+ * This call is issued by the UDC Class driver after calling
+ * gadget driver's unbind() method.
+ *
+ * The details are implementation specific, but it can go as
+ * far as powering off UDC completely and disable its data
+ * line pullups.
+ */
+static inline void usb_gadget_stop(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver)
+{
+	gadget->ops->stop(driver);
+}
+
+/**
+ * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
+ * @gadget: The device we want to stop activity
+ * @driver: The driver to unbind from @gadget
+ *
+ * This call is issued by the UDC Class driver after calling
+ * gadget driver's unbind() method.
+ *
+ * The details are implementation specific, but it can go as
+ * far as powering off UDC completely and disable its data
+ * line pullups.
+ */
+static inline void usb_gadget_udc_stop(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver)
+{
+	gadget->ops->udc_stop(gadget, driver);
+}
+
+/**
+ * usb_udc_release - release the usb_udc struct
+ * @dev: the dev member within usb_udc
+ *
+ * This is called by driver's core in order to free memory once the last
+ * reference is released.
+ */
+static void usb_udc_release(struct device *dev)
+{
+	struct usb_udc *udc;
+
+	udc = container_of(dev, struct usb_udc, dev);
+	dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
+	kfree(udc);
+}
+
+static const struct attribute_group *usb_udc_attr_groups[];
+/**
+ * usb_add_gadget_udc - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller
+ * driver's device.
+ * @gadget: the gadget to be added to the list
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
+{
+	struct usb_udc		*udc;
+	int			ret = -ENOMEM;
+
+	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+	if (!udc)
+		goto err1;
+
+	device_initialize(&udc->dev);
+	udc->dev.release = usb_udc_release;
+	udc->dev.class = udc_class;
+	udc->dev.groups = usb_udc_attr_groups;
+	udc->dev.parent = parent;
+	ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
+	if (ret)
+		goto err2;
+
+	udc->gadget = gadget;
+
+	mutex_lock(&udc_lock);
+	list_add_tail(&udc->list, &udc_list);
+
+	ret = device_add(&udc->dev);
+	if (ret)
+		goto err3;
+
+	mutex_unlock(&udc_lock);
+
+	return 0;
+err3:
+	list_del(&udc->list);
+	mutex_unlock(&udc_lock);
+
+err2:
+	put_device(&udc->dev);
+
+err1:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
+
+static int udc_is_newstyle(struct usb_udc *udc)
+{
+	if (udc->gadget->ops->udc_start && udc->gadget->ops->udc_stop)
+		return 1;
+	return 0;
+}
+
+
+static void usb_gadget_remove_driver(struct usb_udc *udc)
+{
+	dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
+			udc->gadget->name);
+
+	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+
+	if (udc_is_newstyle(udc)) {
+		udc->driver->disconnect(udc->gadget);
+		usb_gadget_disconnect(udc->gadget);
+		udc->driver->unbind(udc->gadget);
+		usb_gadget_udc_stop(udc->gadget, NULL);
+		
+		device_del(&udc->gadget->dev); // xjy add
+	} else {
+		usb_gadget_stop(udc->gadget, udc->driver);
+	}
+
+	udc->driver = NULL;
+	udc->dev.driver = NULL;
+}
+
+/**
+ * usb_del_gadget_udc - deletes @udc from udc_list
+ * @gadget: the gadget to be removed.
+ *
+ * This, will call usb_gadget_unregister_driver() if
+ * the @udc is still busy.
+ */
+void usb_del_gadget_udc(struct usb_gadget *gadget)
+{
+	struct usb_udc		*udc = NULL;
+
+	mutex_lock(&udc_lock);
+	list_for_each_entry(udc, &udc_list, list)
+		if (udc->gadget == gadget)
+			goto found;
+
+	dev_err(gadget->dev.parent, "gadget not registered.\n");
+	mutex_unlock(&udc_lock);
+
+	return;
+
+found:
+	dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
+
+	list_del(&udc->list);
+	mutex_unlock(&udc_lock);
+
+	if (udc->driver)
+		usb_gadget_remove_driver(udc);
+
+	kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+	device_unregister(&udc->dev);
+}
+EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct usb_udc		*udc = NULL;
+	int			ret;
+
+	if (!driver || !bind || !driver->setup)
+		return -EINVAL;
+
+	mutex_lock(&udc_lock);
+	list_for_each_entry(udc, &udc_list, list) {
+		/* For now we take the first one */
+		if (!udc->driver)
+			goto found;
+	}
+
+	pr_debug("couldn't find an available UDC\n");
+	mutex_unlock(&udc_lock);
+	return -ENODEV;
+
+found:
+	dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
+			driver->function);
+
+	udc->driver = driver;
+	udc->dev.driver = &driver->driver;
+
+	if (udc_is_newstyle(udc)) {
+
+		ret = device_add(&udc->gadget->dev); // xjy add
+		ret = bind(udc->gadget);
+		if (ret)
+			goto err1;
+		ret = usb_gadget_udc_start(udc->gadget, driver);
+		if (ret) {
+			driver->unbind(udc->gadget);
+			goto err1;
+		}
+		//usb_gadget_connect(udc->gadget);  //test
+	} else {
+
+		ret = usb_gadget_start(udc->gadget, driver, bind);
+		if (ret)
+			goto err1;
+
+	}
+
+	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&udc_lock);
+	return 0;
+
+err1:
+	dev_err(&udc->dev, "failed to start %s: %d\n",
+			udc->driver->function, ret);
+	udc->driver = NULL;
+	udc->dev.driver = NULL;
+	mutex_unlock(&udc_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct usb_udc		*udc = NULL;
+	int			ret = -ENODEV;
+
+	if (!driver || !driver->unbind)
+		return -EINVAL;
+
+	mutex_lock(&udc_lock);
+	list_for_each_entry(udc, &udc_list, list)
+		if (udc->driver == driver) {
+			usb_gadget_remove_driver(udc);
+			ret = 0;
+			break;
+		}
+
+	mutex_unlock(&udc_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t usb_udc_srp_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t n)
+{
+	struct usb_udc		*udc = container_of(dev, struct usb_udc, dev);
+
+	if (sysfs_streq(buf, "1"))
+		usb_gadget_wakeup(udc->gadget);
+
+	return n;
+}
+static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
+
+static ssize_t usb_udc_softconn_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t n)
+{
+	struct usb_udc		*udc = container_of(dev, struct usb_udc, dev);
+
+	if (sysfs_streq(buf, "connect")) {
+		if (udc_is_newstyle(udc))
+			usb_gadget_udc_start(udc->gadget, udc->driver);
+		usb_gadget_connect(udc->gadget);
+	} else if (sysfs_streq(buf, "disconnect")) {
+		usb_gadget_disconnect(udc->gadget);
+		if (udc_is_newstyle(udc))
+			usb_gadget_udc_stop(udc->gadget, udc->driver);
+	} else {
+		dev_err(dev, "unsupported command '%s'\n", buf);
+		return -EINVAL;
+	}
+
+	return n;
+}
+static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
+
+#define USB_UDC_SPEED_ATTR(name, param)					\
+ssize_t usb_udc_##param##_show(struct device *dev,			\
+		struct device_attribute *attr, char *buf)		\
+{									\
+	struct usb_udc *udc = container_of(dev, struct usb_udc, dev);	\
+	return snprintf(buf, PAGE_SIZE, "%s\n",				\
+			usb_speed_string(udc->gadget->param));		\
+}									\
+static DEVICE_ATTR(name, S_IRUSR, usb_udc_##param##_show, NULL)
+
+static USB_UDC_SPEED_ATTR(current_speed, speed);
+static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
+
+/* TODO: Scheduled for removal in 3.8. */
+static ssize_t usb_udc_is_dualspeed_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_udc		*udc = container_of(dev, struct usb_udc, dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			gadget_is_dualspeed(udc->gadget));
+}
+static DEVICE_ATTR(is_dualspeed, S_IRUSR, usb_udc_is_dualspeed_show, NULL);
+
+#define USB_UDC_ATTR(name)					\
+ssize_t usb_udc_##name##_show(struct device *dev,		\
+		struct device_attribute *attr, char *buf)	\
+{								\
+	struct usb_udc		*udc = container_of(dev, struct usb_udc, dev); \
+	struct usb_gadget	*gadget = udc->gadget;		\
+								\
+	return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name);	\
+}								\
+static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
+
+static USB_UDC_ATTR(is_otg);
+static USB_UDC_ATTR(is_a_peripheral);
+static USB_UDC_ATTR(b_hnp_enable);
+static USB_UDC_ATTR(a_hnp_support);
+static USB_UDC_ATTR(a_alt_hnp_support);
+
+static struct attribute *usb_udc_attrs[] = {
+	&dev_attr_srp.attr,
+	&dev_attr_soft_connect.attr,
+	&dev_attr_current_speed.attr,
+	&dev_attr_maximum_speed.attr,
+
+	&dev_attr_is_dualspeed.attr,
+	&dev_attr_is_otg.attr,
+	&dev_attr_is_a_peripheral.attr,
+	&dev_attr_b_hnp_enable.attr,
+	&dev_attr_a_hnp_support.attr,
+	&dev_attr_a_alt_hnp_support.attr,
+	NULL,
+};
+
+static const struct attribute_group usb_udc_attr_group = {
+	.attrs = usb_udc_attrs,
+};
+
+static const struct attribute_group *usb_udc_attr_groups[] = {
+	&usb_udc_attr_group,
+	NULL,
+};
+
+static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct usb_udc		*udc = container_of(dev, struct usb_udc, dev);
+	int			ret;
+
+	ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
+	if (ret) {
+		dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
+		return ret;
+	}
+
+	if (udc->driver) {
+		ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
+				udc->driver->function);
+		if (ret) {
+			dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int __init usb_udc_init(void)
+{
+	udc_class = class_create(THIS_MODULE, "udc");
+	if (IS_ERR(udc_class)) {
+		pr_err("failed to create udc class --> %ld\n",
+				PTR_ERR(udc_class));
+		return PTR_ERR(udc_class);
+	}
+
+	udc_class->dev_uevent = usb_udc_uevent;
+	return 0;
+}
+subsys_initcall(usb_udc_init);
+
+static void __exit usb_udc_exit(void)
+{
+	class_destroy(udc_class);
+}
+module_exit(usb_udc_exit);
+
+MODULE_DESCRIPTION("UDC Framework");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c
new file mode 100755
index 0000000..649d5ec
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c
@@ -0,0 +1,857 @@
+

+/*

+|| sanchips add: xjy@20150312 for usbconfig-hotplug

+*/

+

+#include <linux/device.h>

+#include <linux/module.h>

+#include <linux/kernel.h>

+

+#include <linux/init.h>

+

+#include <linux/string.h>

+

+#include <linux/sysfs.h>

+

+#include <linux/stat.h>

+#include <linux/slab.h>

+#include <linux/kobject.h>

+#include <linux/android_notify.h>

+#include <mach/highspeed_debug.h>

+#include <mach/iomap.h>

+

+extern int detected_charger(void);

+static unsigned int charger_plug = 0;

+static unsigned int usb_plug = 0;

+//static unsigned int sys_id = 1;//0 is windows, other value not windows(mac or linux)

+static unsigned int en_mods = 0;//enable mods

+//static unsigned int auto_eject_ms = 0;

+unsigned int hotplug_flag = 1;

+unsigned int usb_printk_en = 1;

+EXPORT_SYMBOL_GPL(usb_printk_en);

+unsigned int mmc_printk_en = 1;

+unsigned int gmac_printk_en = 1;

+

+unsigned int force_net = 0;

+unsigned int set_panic = 0;

+//for auto test,mdl dev with gpio detect usb plug in/out

+unsigned int usb_gpio_detect_enable = 0;

+//add by gsn, to avoid usb in vincPkt_list backlogged many Skb, set a limit

+unsigned int rndis_vplist_max = 800;

+

+static unsigned int ramdump_flag = 0;

+#define RNDIS_NUM    4

+unsigned int usblan[RNDIS_NUM] = {0,0,0,0};

+

+#define CHARGER_PLUG_NAME	"chargerPlug"

+#define USB_PLUG_NAME		"usbPlug"

+#define SYS_ID_NAME			"sysId"

+#define ENABLE_MODS		"enMods"

+#define NET_NAME           "netname"

+#define NET0_STATE           "usblan0"

+#define NET1_STATE           "usblan1"

+#define NET2_STATE           "usblan2"

+#define NET3_STATE           "usblan3"

+#define HOT_PLUG           "hotplug"

+#define USB_PRINTK_EN           "usb"

+#define MMC_PRINTK_EN           "mmc"

+#define GMAC_PRINTK_EN           "gmac"

+

+#define FORCE_NET          "forcenet"

+#define SET_PANIC			"set_panic"

+#define RAMDUMP_FLAG		"ramdumpFlag"

+#define RNDIS_VPLST_MAX     "lstmax"

+#define USB_GPIO_DETECT_ENABLE "gpio_detect"

+

+#define USB_LOG_MEM_SIZE	(15*1000)

+#define USB_LOG_MAX_SIZE	512

+

+static char	s_usbMemLog[USB_LOG_MEM_SIZE]= {0};

+static char	*s_localStringBuf = NULL;

+static int		s_usbMemLogIndex=0;

+#define SET_ERARLY_SUSPEND_PANIC	     0x00000001

+#define SET_BNA_ERR_PANIC			     0x00000002

+#define SET_TX_REQ_USEUP_PANIC		     0x00000004

+#define SET_RNDIS_RESET_MSG_PANIC		 0x00000008

+

+

+static struct attribute charger_plug_attr =

+{

+        .name = "chargerPlug",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute usb_plug_attr =

+{

+        .name = "usbPlug",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute sys_id_attr =

+{

+        .name = "sysId",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute en_mods_attr =

+{

+        .name = "enMods",

+        .mode = S_IRUGO|S_IWUSR,

+};

+static struct attribute ramdump_flag_attr =

+{

+        .name = "ramdumpFlag",

+        .mode = S_IRUGO|S_IWUSR,

+};

+static struct attribute net_name_attr =

+{

+        .name = "netname",

+        .mode = S_IRUGO|S_IWUSR,

+};

+static struct attribute net0_state_attr =

+{

+        .name = "usblan0",

+        .mode = S_IRUGO|S_IWUSR,

+};

+static struct attribute net1_state_attr =

+{

+        .name = "usblan1",

+        .mode = S_IRUGO|S_IWUSR,

+};

+static struct attribute net2_state_attr =

+{

+        .name = "usblan2",

+        .mode = S_IRUGO|S_IWUSR,

+};

+static struct attribute net3_state_attr =

+{

+        .name = "usblan3",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute hotplug_state_attr =

+{

+        .name = "hotplug",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute force_net_attr =

+{

+        .name = "forcenet",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute set_panic_attr =

+{

+        .name = "set_panic",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+

+static struct attribute list_max_attr =

+{

+		.name = "lstmax",

+		.mode = S_IRUGO|S_IWUSR,

+};

+

+

+static struct attribute usb_gpio_detect_enable_attr =

+{

+		.name = "gpio_detect",

+		.mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute *usb_status_attrs[] =

+{

+	&charger_plug_attr,

+	&usb_plug_attr,

+	&sys_id_attr,

+	&en_mods_attr,

+	&ramdump_flag_attr,

+	&net_name_attr,

+	&net0_state_attr,

+	&net1_state_attr,

+	&net2_state_attr,

+	&net3_state_attr,

+	&hotplug_state_attr,

+	&force_net_attr,

+	&set_panic_attr,

+	&list_max_attr,

+	&usb_gpio_detect_enable_attr,

+       NULL,

+};

+

+

+static struct attribute usbprintk_state_attr =

+{

+        .name = "usb",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute mmcprintk_state_attr =

+{

+        .name = "mmc",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute gmacprintk_state_attr =

+{

+        .name = "gmac",

+        .mode = S_IRUGO|S_IWUSR,

+};

+

+static struct attribute *print_status_attrs[] =

+{

+	&usbprintk_state_attr,

+	&mmcprintk_state_attr,

+	&gmacprintk_state_attr,

+       NULL,

+};

+

+

+/***********************************************

+

+				USB MODS 

+				

+************************************************/

+

+struct usb_mods{

+

+	/* µ¥¹âÅ̱êÖ¾ */

+	int only_cdrom;

+

+	/* ÊÖ¶¯µ¯¹âÅÌ ±êÖ¾*/

+	int is_eject_cdrom; 

+

+	/* ×Ô¶¯µ¯¹âÅÌ ±êÖ¾*/

+	int auto_eject_cdrom;

+	int start_timer_flag;

+	struct timer_list	timer;

+	struct work_struct work;

+

+	/* ϵͳʶ±ð±êÖ¾ */

+	int sys_id;	//0 is windows, other value not windows(mac or linux)

+};

+

+static struct usb_mods mods;

+void usb_mods_activate(void);

+

+/* MODSµ¯¹âÅÌÑÓʱһ°ã·¶Î§Îª3~5Ã룬ÔÝÉèΪ3Ãë */

+#define USB_MODS_TIMER_EXPIRES 	3000 

+

+

+int usb_do_reject_cdrom(void)

+{

+	if(mods.only_cdrom){

+		USBSTACK_DBG("usb_do_reject_cdrom!!!! ");

+		mods.is_eject_cdrom = 1;

+	}

+    return 1;

+}

+

+int usb_is_reject_cdrom(void)

+{

+	if(mods.is_eject_cdrom){

+		USBSTACK_DBG("usb_is_reject_cdrom!!!! ");

+		usb_notify_up(USB_CDROM_OBJECT, NULL);

+		return 1;

+	}

+

+	usb_mods_activate();

+	return 0;

+}

+void usb_set_sys_id(int sysId)

+{

+	if(sysId){

+		if(mods.sys_id != sysId)

+			USBSTACK_DBG("sys is mac or linux");

+	}else{

+		if(mods.sys_id != sysId)

+			USBSTACK_DBG("sys is windows");

+	}

+	mods.sys_id = sysId;

+}

+

+void usb_set_ms_auto_reject(int flag)

+{

+	if(mods.auto_eject_cdrom == flag)

+	{

+		//USBSTACK_DBG("already set, set mods-eject-cdrom flag: %d", flag);

+		return;

+	}

+

+	if(en_mods){

+		USBSTACK_DBG("mods enable, set mods-eject-cdrom flag: %d", flag);

+		mods.auto_eject_cdrom = flag;

+	}else{

+		USBSTACK_DBG("mods disable, set mods-eject-cdrom flag: 0");

+		mods.auto_eject_cdrom = 0;

+	}

+}

+//EXPORT_SYMBOL_GPL(usb_set_ms_auto_eject);

+

+

+int usb_get_ms_auto_reject(void)

+{

+	if(en_mods)

+		return mods.auto_eject_cdrom;

+	else

+		return 0;

+}

+//EXPORT_SYMBOL_GPL(usb_get_ms_auto_reject);

+

+

+void usb_mods_activate(void)

+{

+	unsigned long expire;

+	

+	if(mods.auto_eject_cdrom){

+		//mods.beginTime = jiffies;

+		if(mods.start_timer_flag == 0){

+			expire = msecs_to_jiffies(USB_MODS_TIMER_EXPIRES) + jiffies;

+			mods.start_timer_flag = 1;

+			mod_timer(&mods.timer, expire);

+			USBSTACK_DBG("mods timer start");

+		}

+	}

+}

+

+void usb_mods_deactive(void)

+{	

+	mods.start_timer_flag = 0;

+	del_timer_sync(&mods.timer);

+}

+		

+void usb_mods_timer_callback(unsigned long data)

+{

+	USBSTACK_DBG("usb mods timer");

+	schedule_work(&mods.work);

+		}	

+static void usb_mods_work(struct work_struct *data)

+{

+	USBSTACK_DBG("usb mods work");

+	if(usb_plug)

+		usb_notify_up(USB_CDROM_OBJECT, NULL);

+	}

+

+void usb_mods_init(void)

+{

+	USBSTACK_DBG("usb_mods_init");

+	mods.only_cdrom = 1;

+	mods.auto_eject_cdrom =en_mods;

+	mods.sys_id = 1;

+	mods.start_timer_flag = 0;

+	setup_timer(&mods.timer, usb_mods_timer_callback, (unsigned long)(&mods));

+	INIT_WORK(&mods.work, usb_mods_work);

+}

+

+

+void usb_mods_exit(void)

+{

+	USBSTACK_DBG("usb_mods_exit");

+	usb_mods_deactive();

+	//flush_work_sync(&mods.work);

+	mods.only_cdrom =0;

+	mods.auto_eject_cdrom =0;

+	mods.is_eject_cdrom = 0;

+}

+

+/************usb  mods end ****************************/

+unsigned int get_panic_flag(void)

+{

+	return set_panic;

+}

+

+EXPORT_SYMBOL_GPL(get_panic_flag);

+

+unsigned int get_usb_gpio_detect_flag(void)

+{

+	return usb_gpio_detect_enable;

+}

+

+EXPORT_SYMBOL_GPL(get_usb_gpio_detect_flag);

+

+

+int usb_get_rndis_list_max_flag(void)

+{

+	return rndis_vplist_max;

+}

+EXPORT_SYMBOL_GPL(usb_get_rndis_list_max_flag);

+

+

+static void usb_ramdump_config(void)

+{

+	usb_notify_up(USB_RAMDUMP_TRIGGER, NULL);	

+}

+ssize_t kobj_usb_show(struct kobject *kobject,struct attribute *attr,char *buf)

+{

+	int dc=0;

+

+	  if(!strcmp(attr->name, CHARGER_PLUG_NAME)){

+	  		sprintf(buf, "%d\n",charger_plug );

+	  }else if(!strcmp(attr->name, USB_PLUG_NAME)){

+	  		dc = detected_charger();

+	  	  	USBSTACK_DBG("detected charger=%d",dc);

+			if(dc == 1)

+	              	usb_plug = 1;

+	  		sprintf(buf, "%d\n",usb_plug );

+	  }else if(!strcmp(attr->name, SYS_ID_NAME)){

+	  		sprintf(buf, "%d\n",mods.sys_id );

+	  }else if(!strcmp(attr->name, ENABLE_MODS)){

+	  		sprintf(buf, "%d\n",en_mods );

+	  }else if(!strcmp(attr->name, RAMDUMP_FLAG)){

+	  		sprintf(buf, "%d\n",ramdump_flag);

+	  }else if(!strcmp(attr->name, NET_NAME)){

+	  		sprintf(buf,"%s%s%s%s",usblan[0]?"usblan0;":"",usblan[1]?"usblan1;":"",usblan[2]?"usblan2;":"",usblan[3]?"usblan3;":"");

+	  }else if(!strcmp(attr->name, NET0_STATE)){

+	  		sprintf(buf, "%d",usblan[0]);

+	  }else if(!strcmp(attr->name, NET1_STATE)){

+	  		sprintf(buf, "%d",usblan[1]);

+	  }else if(!strcmp(attr->name, NET2_STATE)){

+	  		sprintf(buf, "%d",usblan[2]);

+	  }else if(!strcmp(attr->name, NET3_STATE)){

+	  		sprintf(buf, "%d",usblan[3]);

+	  }else if(!strcmp(attr->name, HOT_PLUG)){

+	  		sprintf(buf, "%d",hotplug_flag);

+	  }else if(!strcmp(attr->name, USB_PRINTK_EN)){

+	  		sprintf(buf, "%d",usb_printk_en);

+	  }else if(!strcmp(attr->name, FORCE_NET)){

+	  		sprintf(buf, "%d",force_net);

+	  }else if(!strcmp(attr->name, SET_PANIC)){

+	  		sprintf(buf, "%u",set_panic);

+	  }else if(!strcmp(attr->name, RNDIS_VPLST_MAX)){

+	  		 sprintf(buf, "%u",rndis_vplist_max);

+	  } else if(!strcmp(attr->name, USB_GPIO_DETECT_ENABLE)){

+	  		 sprintf(buf, "%u",usb_gpio_detect_enable);

+	  } 	  

+

+      return strlen(buf);

+}

+

+//void usbPoll_test(void);

+ssize_t kobj_usb_store(struct kobject *kobject,struct attribute *attr, const char *buf,size_t size)

+{

+	unsigned int value = 0;

+	

+	value = simple_strtoul(buf, NULL, 10);

+	if(!strcmp(attr->name,CHARGER_PLUG_NAME)){

+		charger_plug = value;

+	}else if(!strcmp(attr->name,USB_PLUG_NAME)){

+		//usbPoll_test();

+		usb_plug = value;

+	}else if(!strcmp(attr->name,SYS_ID_NAME)){

+		usb_set_sys_id(value);

+	}else if(!strcmp(attr->name,ENABLE_MODS)){

+		en_mods =value;

+	}else if(!strcmp(attr->name,RAMDUMP_FLAG)){

+		ramdump_flag =value;

+		if(ramdump_flag)

+			usb_ramdump_config();

+	}else if(!strcmp(attr->name,NET0_STATE)){

+		usblan[0] =value;

+	}

+	else if(!strcmp(attr->name,NET1_STATE)){

+		usblan[1] =value;

+	}else if(!strcmp(attr->name,NET2_STATE)){

+		usblan[2] =value;

+	}else if(!strcmp(attr->name,NET3_STATE)){

+		usblan[3] =value;

+	}else if(!strcmp(attr->name,HOT_PLUG)){

+		hotplug_flag =value;

+	}else if(!strcmp(attr->name,USB_PRINTK_EN)){

+		usb_printk_en =value;

+	}else if(!strcmp(attr->name,FORCE_NET)){

+		force_net =value;	

+	}else if(!strcmp(attr->name,SET_PANIC)){

+		set_panic =value;

+	}else if(!strcmp(attr->name,RNDIS_VPLST_MAX)){

+		rndis_vplist_max =value;

+	}else if(!strcmp(attr->name,USB_GPIO_DETECT_ENABLE)){

+		usb_gpio_detect_enable =value;

+	}

+	

+	return size;

+}

+

+static struct sysfs_ops obj_usb_sysops =

+{

+        .show = kobj_usb_show,

+        .store = kobj_usb_store,        

+};

+

+void obj_usb_release(struct kobject *kobject)

+{

+	usb_printk("[kobj_test: release!]\n");

+}

+static struct kobj_type ktype =

+

+{       .release = obj_usb_release,

+        .sysfs_ops = &obj_usb_sysops,

+        .default_attrs = usb_status_attrs,

+};

+

+ssize_t kobj_print_show(struct kobject *kobject,struct attribute *attr,char *buf)

+{

+	  if(!strcmp(attr->name, USB_PRINTK_EN)){

+	  		sprintf(buf, "%d",usb_printk_en);

+	  }else if(!strcmp(attr->name, MMC_PRINTK_EN)){

+	  		sprintf(buf, "%d",mmc_printk_en);

+	  }else if(!strcmp(attr->name, GMAC_PRINTK_EN)){

+	  		sprintf(buf, "%d",gmac_printk_en);

+	  }

+

+      return strlen(buf);

+}

+

+//void usbPoll_test(void);

+ssize_t kobj_print_store(struct kobject *kobject,struct attribute *attr, const char *buf,size_t size)

+{

+	unsigned int value = 0;

+	

+	value = simple_strtoul(buf, NULL, 4);

+	if(!strcmp(attr->name,USB_PRINTK_EN)){

+		usb_printk_en =value;

+	}else if(!strcmp(attr->name,MMC_PRINTK_EN)){

+		mmc_printk_en =value;

+	}else if(!strcmp(attr->name,GMAC_PRINTK_EN)){

+		gmac_printk_en =value;

+	}

+	

+	return size;

+}

+

+static struct sysfs_ops obj_print_sysops =

+{

+        .show = kobj_print_show,

+        .store = kobj_print_store,        

+};

+static struct kobj_type kprinttype =

+

+{       .release = obj_usb_release,

+        .sysfs_ops = &obj_print_sysops,

+        .default_attrs = print_status_attrs,

+};

+

+static int kset_filter(struct kset *kset,struct kobject *kobj)

+{

+//    int ret=0;

+//    struct kobj_type *ktype = get_ktype(kobj); /* µÃµ½ÊôÐÔÀàÐÍ */

+//    ret = (ktype == &ktype_part);

+   usb_printk("Filter: kobj %s.\n",kobj->name);

+     return 1;

+}

+

+static const char *kset_name(struct kset *kset,struct kobject *kobj)

+{    

+    static char buf[20];

+

+/*    struct device *dev = to_dev(kobj);

+   if(dev->bus)

+        return dev->bus->name;

+    else if(dev->class)

+        return dev->class->name;

+    else

+*/    {

+       usb_printk("Name kobj %s.\n",kobj->name);

+       sprintf(buf,"%s","dwc_usb");

+   }

+       return buf;

+}

+

+static int kset_uevent(struct kset *kset,struct kobject *kobj, struct kobj_uevent_env *env)

+{

+   int i = 0;

+    usb_printk("uevent: kobj %s.\n",kobj->name);

+

+   while(i < env->envp_idx)

+    {

+        usb_printk("%s.\n",env->envp[i]);

+       i ++;

+    }

+

+    return 0;

+}

+

+static struct kset_uevent_ops uevent_ops =

+{

+    .filter = kset_filter,

+    .name = kset_name,

+    .uevent = kset_uevent,

+};

+

+struct kset *kset_p;

+struct kset *kset_usb;

+//struct kset kset_c;

+struct kobject *usbkobj = NULL;

+struct kobject *printkobj = NULL;

+static int __init kset_usb_init(void)

+{

+  int ret = 0;

+

+   usb_printk("kset test init!\n");

+   

+   /* ´´½¨²¢×¢²á kset_p */

+   kset_usb = kset_create_and_add("dwc_usb", &uevent_ops, NULL);    

+    

+  // kobject_set_name(&kset_c.kobj,"kset_c");

+ //  kset_c.kobj.kset = kset_p;    /* Ìí¼Ó kset_c µ½ kset_p */

+

+    /* ¶ÔÓÚ½Ïа汾µÄÄںˣ¬ÔÚ×¢²á kset ֮ǰ£¬ÐèÒª 

+        * Ìî³ä kset.kobj µÄ ktype ³ÉÔ±£¬·ñÔò×¢²á²»»á³É¹¦ */

+  // kset_c.kobj.ktype = &ktype;

+   //ret = kset_register(&kset_c);

+

+  // if(ret)

+   //     kset_unregister(kset_p);

+   usbkobj = kzalloc(sizeof(*usbkobj),GFP_KERNEL);

+   if(!usbkobj){

+   		usb_printk(KERN_WARNING "mallock usbkobj failed \n");

+		return 0;

+   }

+   kobject_init(usbkobj, &ktype);

+   kobject_add(usbkobj,&kset_usb->kobj,"%s","usbconfig");

+   usbkobj->kset = kset_usb;

+

+    kset_p = kset_create_and_add("highspeed_print_en", &uevent_ops, NULL);    

+	

+     printkobj = kzalloc(sizeof(*printkobj),GFP_KERNEL);

+   if(!printkobj){

+   		usb_printk(KERN_WARNING "mallock printkobj failed \n");

+		return 0;

+   }

+   kobject_init(printkobj, &kprinttype);

+   kobject_add(printkobj,&kset_p->kobj,"%s","printconfig");

+   printkobj->kset = kset_p;

+

+  // usbkobj = kobject_create_and_add("usbconfig", &kset_p->kobj);

+  // usbkobj->kset = kset_p;

+   //usbkobj->ktype = &ktype;

+    

+   return ret;

+}

+

+

+static void __exit kset_usb_exit(void)

+{

+   usb_printk("kset test exit!\n");

+   

+  // kset_unregister(&kset_c);

+   kset_unregister(kset_usb);

+   kset_unregister(kset_p);

+}

+

+

+void usb_notify_up(usb_notify_event notify_type, void* puf)

+{

+	unsigned char buf[100];

+	int rtv = -1;

+	enum kobject_action action =KOBJ_MAX;

+	char*envp_ext[] = {NULL,NULL};

+	strcpy(buf, "cdrom-object");

+	switch(notify_type){

+		case USB_CDROM_OBJECT:

+			usb_printk("usb cdrom object \n");

+			USBSTACK_DBG("@CDROM REJECT");

+			strcpy(buf,"USBEVENT=usb_switch");

+			action = KOBJ_CHANGE;

+			break;

+

+		case USB_DEVICE_PLUGIN:

+			usb_printk("usb device plugin \n");

+			USBSTACK_DBG("@USB DEV PLUG IN");

+			strcpy(buf,"USBEVENT=usb_device_in");

+			action = KOBJ_ADD;

+			usb_plug = 1;

+			break;

+

+		case USB_DEVICE_PLUGOUT:

+			usb_printk("usb device plugout \n");

+			USBSTACK_DBG("@USB DEV PLUG OUT");

+			strcpy(buf,"USBEVENT=usb_device_out");

+			action = KOBJ_REMOVE;

+			usb_plug = 0;

+			break;

+		case USB_CHARGER_PLUGIN:

+			usb_printk("usb charger plugin \n");

+			USBSTACK_DBG("@USB CHARGER PLUG IN");

+			strcpy(buf,"USBEVENT=usb_charger_in");

+			action = KOBJ_ADD;

+			charger_plug = 1;

+			break;

+

+		case USB_CHARGER_PLUGOUT:

+			usb_printk("usb charge plugout \n");

+			USBSTACK_DBG("@USB CHARGER PLUG OUT");

+			strcpy(buf,"USBEVENT=usb_charger_out");

+			action = KOBJ_REMOVE;

+			charger_plug = 0;

+			break;

+		case USB_RAMDUMP_TRIGGER:

+			usb_printk("usb ramdump trigger \n");

+			USBSTACK_DBG("@USB RAMDUMP TRIGGERT");

+			 strcpy(buf,"USBEVENT=usb_ramdump");

+			 action = KOBJ_CHANGE;

+             break;

+		case USB_SWITCH_USER:

+			usb_printk("usb switch to user mode \n");

+			USBSTACK_DBG("@USB SWITCH USER");

+			strcpy(buf,"USBEVENT=usb_user");

+			action = KOBJ_CHANGE;

+			break;

+

+		case USB_SWITCH_DEBUG:

+			usb_printk("usb switch to debug mode \n");

+			USBSTACK_DBG("@USB SWITCH DEBUG");

+			strcpy(buf,"USBEVENT=usb_debug");

+			action = KOBJ_CHANGE;

+			break;

+

+		case USB_SWITCH_FACTORY:

+			usb_printk("usb switch to factory mode \n");

+			USBSTACK_DBG("@USB SWITCH FACTORY");

+			strcpy(buf,"USBEVENT=usb_factory");

+			action = KOBJ_CHANGE;

+			break;

+

+		case USB_SWITCH_AMT:

+			usb_printk("usb switch to amt mode \n");

+			USBSTACK_DBG("@USB SWITCH AMT");

+			strcpy(buf,"USBEVENT=usb_amt");

+			action = KOBJ_CHANGE;

+			break;

+

+		case USB_SWITCH_EYE_DIAGRAM:

+			usb_printk("usb switch to EYE_DIAGRAM mode \n");

+			USBSTACK_DBG("@USB SWITCH EYE_DIAGRAM");

+			strcpy(buf,"USBEVENT=usb_eye_diagram");

+			action = KOBJ_CHANGE;

+			break;

+

+		case USB_DEVICE_EXCEPT_RESET:

+			usb_printk("usb catch except reset \n");

+			USBSTACK_DBG("@USB EXCEPT RESET");

+			strcpy(buf,"USBEVENT=usb_except_reset");

+			action = KOBJ_CHANGE;

+			break;

+			 

+		default:

+			usb_printk(KERN_WARNING "UNKWON USB EVENT \n");

+			break;

+	}

+    envp_ext[0] = buf;

+	if(usbkobj && hotplug_flag){

+		rtv = kobject_uevent_env(usbkobj, action,envp_ext);

+	}

+	usb_printk(KERN_WARNING "rtv:%d \n",rtv);

+}

+EXPORT_SYMBOL_GPL(usb_notify_up);

+

+

+

+void usb_dbg_printf(const char *fmt,...)

+{

+    va_list args;

+    int stringCnt = 0;

+

+    if ((USB_LOG_MAX_SIZE +s_usbMemLogIndex)>=USB_LOG_MEM_SIZE)

+    {

+        s_usbMemLogIndex=0;

+    }

+	

+    s_localStringBuf = &s_usbMemLog[s_usbMemLogIndex];

+

+

+    va_start(args, fmt);

+    stringCnt+=vsprintf((char *)s_localStringBuf, fmt, args);

+    va_end(args);

+	

+    s_usbMemLogIndex+=stringCnt;

+}

+EXPORT_SYMBOL_GPL(usb_dbg_printf);

+

+void usb_dbg_showLog(void)

+{

+	int SingleStrLen=0;

+	char logBuf[USB_LOG_MAX_SIZE+1]={0};

+	char *pStart=s_usbMemLog;

+	char *pEnd=NULL;

+

+	logBuf[USB_LOG_MAX_SIZE] = '\n';

+

+	pEnd = (char *)strchr((const char *)pStart, '\n');

+	while((pEnd < (&s_usbMemLog[USB_LOG_MEM_SIZE]))&&(pEnd >=pStart))

+	{

+		SingleStrLen= pEnd-pStart;	

+		memcpy(logBuf,pStart,SingleStrLen);

+		printk("%s\n", logBuf);	

+		

+		do{

+			pStart=pEnd?(pEnd+1):(pStart+1);

+			memset(logBuf,0,USB_LOG_MAX_SIZE);

+			pEnd = (char *)strchr((const char *)pStart, '\n');

+		}while(!pEnd);

+	}

+

+}

+EXPORT_SYMBOL_GPL(usb_dbg_showLog);

+

+void usb_dbg_ep0reg(void)

+{

+#if 0

+    USBREG_DBG("\n GINTSTS:  0x%08x, GINTMASK: 0x%08x, DCFG:     0x%08x, DCTL:     0x%08x,\n DSTS:     0x%08x, DIEPMSK:  0x%08x, DOEPMSK:  0x%08x, DAINT:     0x%08x,\n DAINTMSK: 0x%08x, DIEPCTL0: 0x%08x, DIEPINT0: 0x%08x, DIEPTSIZE0:0x%08x,\n DIEPDMA0: 0x%08x, DIEPDMB0: 0x%08x, DOEPCTL0: 0x%08x, DOEPINT0: 0x%08x,\n DOEPSIZ0: 0x%08x, DOEPDMA0: 0x%08x, unused:   0x%08x, DOEPDMAB0:0x%08x,", 

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x14), //GINTSTS

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x18), //GINTMAK

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x800), //DCFG

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x804), //DCTL  line1 end

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x808), //DSTS

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x810), //DIEPMSK 

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x814), //DOEPMSK

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x818), //DAINT   line2 end

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x81c), //DAINTMSK

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x900), //DIEPCTL0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x908), //DIEPINT0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x910), //DIEPTSIZE0  line3 end

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x914), //DIEPDMA0

+        *(volatile unsigned int *)(ZX_USB_BASE+0x91C), //DIEPDMB0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB00), //DOEPCTL0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB08), //DOEPINT0   line4 end

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB10), //DOEPSIZ0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB14), //DOEPDMA0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB18), //unused

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB1C) //DOEPDMAB0  line5 end

+    	);

+#endif

+}

+EXPORT_SYMBOL_GPL(usb_dbg_ep0reg);

+

+void usb_print_ep0reg(void)

+{

+    USBHAL_DBG("\n GINTSTS:  0x%08x, GINTMASK: 0x%08x, DCFG:     0x%08x, DCTL:     0x%08x,\n DSTS:     0x%08x, DIEPMSK:  0x%08x, DOEPMSK:  0x%08x, DAINT:     0x%08x,\n DAINTMSK: 0x%08x, DIEPCTL0: 0x%08x, DIEPINT0: 0x%08x, DIEPTSIZE0:0x%08x,\n DIEPDMA0: 0x%08x, DIEPDMB0: 0x%08x, DOEPCTL0: 0x%08x, DOEPINT0: 0x%08x,\n DOEPSIZ0: 0x%08x, DOEPDMA0: 0x%08x, unused:   0x%08x, DOEPDMAB0:0x%08x,", 

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x14), //GINTSTS

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x18), //GINTMAK

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x800), //DCFG

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x804), //DCTL  line1 end

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x808), //DSTS

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x810), //DIEPMSK 

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x814), //DOEPMSK

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x818), //DAINT   line2 end

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x81c), //DAINTMSK

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x900), //DIEPCTL0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x908), //DIEPINT0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x910), //DIEPTSIZE0  line3 end

+    	*(volatile unsigned int *)(ZX_USB_BASE+0x914), //DIEPDMA0

+        *(volatile unsigned int *)(ZX_USB_BASE+0x91C), //DIEPDMB0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB00), //DOEPCTL0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB08), //DOEPINT0   line4 end

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB10), //DOEPSIZ0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB14), //DOEPDMA0

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB18), //unused

+    	*(volatile unsigned int *)(ZX_USB_BASE+0xB1C) //DOEPDMAB0  line5 end

+    	);

+}

+EXPORT_SYMBOL_GPL(usb_print_ep0reg);

+module_init(kset_usb_init);

+

+module_exit(kset_usb_exit);

+

+

diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usbstring.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usbstring.c
new file mode 100644
index 0000000..4d25b90
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usbstring.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/nls.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+
+/**
+ * usb_gadget_get_string - fill out a string descriptor 
+ * @table: of c strings encoded using UTF-8
+ * @id: string id, from low byte of wValue in get string descriptor
+ * @buf: at least 256 bytes, must be 16-bit aligned
+ *
+ * Finds the UTF-8 string matching the ID, and converts it into a
+ * string descriptor in utf16-le.
+ * Returns length of descriptor (always even) or negative errno
+ *
+ * If your driver needs stings in multiple languages, you'll probably
+ * "switch (wIndex) { ... }"  in your ep0 string descriptor logic,
+ * using this routine after choosing which set of UTF-8 strings to use.
+ * Note that US-ASCII is a strict subset of UTF-8; any string bytes with
+ * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1
+ * characters (which are also widely used in C strings).
+ */
+int
+usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
+{
+	struct usb_string	*s;
+	int			len;
+
+	/* descriptor 0 has the language id */
+	if (id == 0) {
+		buf [0] = 4;
+		buf [1] = USB_DT_STRING;
+		buf [2] = (u8) table->language;
+		buf [3] = (u8) (table->language >> 8);
+		return 4;
+	}
+	for (s = table->strings; s && s->s; s++)
+		if (s->id == id)
+			break;
+
+	/* unrecognized: stall. */
+	if (!s || !s->s)
+		return -EINVAL;
+
+	/* string descriptors have length, tag, then UTF16-LE text */
+	len = min ((size_t) 126, strlen (s->s));
+	len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN,
+			(wchar_t *) &buf[2], 126);
+	if (len < 0)
+		return -EINVAL;
+	buf [0] = (len + 1) * 2;
+	buf [1] = USB_DT_STRING;
+	return buf [0];
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc.h
new file mode 100644
index 0000000..ca4e03a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc.h
@@ -0,0 +1,199 @@
+/*
+ *	uvc_gadget.h  --  USB Video Class Gadget driver
+ *
+ *	Copyright (C) 2009-2010
+ *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#ifndef _UVC_GADGET_H_
+#define _UVC_GADGET_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+#define UVC_EVENT_FIRST			(V4L2_EVENT_PRIVATE_START + 0)
+#define UVC_EVENT_CONNECT		(V4L2_EVENT_PRIVATE_START + 0)
+#define UVC_EVENT_DISCONNECT		(V4L2_EVENT_PRIVATE_START + 1)
+#define UVC_EVENT_STREAMON		(V4L2_EVENT_PRIVATE_START + 2)
+#define UVC_EVENT_STREAMOFF		(V4L2_EVENT_PRIVATE_START + 3)
+#define UVC_EVENT_SETUP			(V4L2_EVENT_PRIVATE_START + 4)
+#define UVC_EVENT_DATA			(V4L2_EVENT_PRIVATE_START + 5)
+#define UVC_EVENT_LAST			(V4L2_EVENT_PRIVATE_START + 5)
+
+struct uvc_request_data
+{
+	__s32 length;
+	__u8 data[60];
+};
+
+struct uvc_event
+{
+	union {
+		enum usb_device_speed speed;
+		struct usb_ctrlrequest req;
+		struct uvc_request_data data;
+	};
+};
+
+#define UVCIOC_SEND_RESPONSE		_IOW('U', 1, struct uvc_request_data)
+
+#define UVC_INTF_CONTROL		0
+#define UVC_INTF_STREAMING		1
+
+/* ------------------------------------------------------------------------
+ * Debugging, printing and logging
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/usb.h>	/* For usb_endpoint_* */
+#include <linux/usb/gadget.h>
+#include <linux/videodev2.h>
+#include <linux/version.h>
+#include <media/v4l2-fh.h>
+
+#include "uvc_queue.h"
+
+#define UVC_TRACE_PROBE				(1 << 0)
+#define UVC_TRACE_DESCR				(1 << 1)
+#define UVC_TRACE_CONTROL			(1 << 2)
+#define UVC_TRACE_FORMAT			(1 << 3)
+#define UVC_TRACE_CAPTURE			(1 << 4)
+#define UVC_TRACE_CALLS				(1 << 5)
+#define UVC_TRACE_IOCTL				(1 << 6)
+#define UVC_TRACE_FRAME				(1 << 7)
+#define UVC_TRACE_SUSPEND			(1 << 8)
+#define UVC_TRACE_STATUS			(1 << 9)
+
+#define UVC_WARN_MINMAX				0
+#define UVC_WARN_PROBE_DEF			1
+
+extern unsigned int uvc_gadget_trace_param;
+
+#define uvc_trace(flag, msg...) \
+	do { \
+		if (uvc_gadget_trace_param & flag) \
+			printk(KERN_DEBUG "uvcvideo: " msg); \
+	} while (0)
+
+#define uvc_warn_once(dev, warn, msg...) \
+	do { \
+		if (!test_and_set_bit(warn, &dev->warnings)) \
+			printk(KERN_INFO "uvcvideo: " msg); \
+	} while (0)
+
+#define uvc_printk(level, msg...) \
+	printk(level "uvcvideo: " msg)
+
+/* ------------------------------------------------------------------------
+ * Driver specific constants
+ */
+
+#define DRIVER_VERSION				"0.1.0"
+#define DRIVER_VERSION_NUMBER			KERNEL_VERSION(0, 1, 0)
+
+#define DMA_ADDR_INVALID			(~(dma_addr_t)0)
+
+#define UVC_NUM_REQUESTS			4
+#define UVC_MAX_REQUEST_SIZE			64
+#define UVC_MAX_EVENTS				4
+
+/* ------------------------------------------------------------------------
+ * Structures
+ */
+
+struct uvc_video
+{
+	struct usb_ep *ep;
+
+	/* Frame parameters */
+	u8 bpp;
+	u32 fcc;
+	unsigned int width;
+	unsigned int height;
+	unsigned int imagesize;
+
+	/* Requests */
+	unsigned int req_size;
+	struct usb_request *req[UVC_NUM_REQUESTS];
+	__u8 *req_buffer[UVC_NUM_REQUESTS];
+	struct list_head req_free;
+	spinlock_t req_lock;
+
+	void (*encode) (struct usb_request *req, struct uvc_video *video,
+			struct uvc_buffer *buf);
+
+	/* Context data used by the completion handler */
+	__u32 payload_size;
+	__u32 max_payload_size;
+
+	struct uvc_video_queue queue;
+	unsigned int fid;
+};
+
+enum uvc_state
+{
+	UVC_STATE_DISCONNECTED,
+	UVC_STATE_CONNECTED,
+	UVC_STATE_STREAMING,
+};
+
+struct uvc_device
+{
+	struct video_device *vdev;
+	enum uvc_state state;
+	struct usb_function func;
+	struct uvc_video video;
+
+	/* Descriptors */
+	struct {
+		const struct uvc_descriptor_header * const *control;
+		const struct uvc_descriptor_header * const *fs_streaming;
+		const struct uvc_descriptor_header * const *hs_streaming;
+	} desc;
+
+	unsigned int control_intf;
+	struct usb_ep *control_ep;
+	struct usb_request *control_req;
+	void *control_buf;
+
+	unsigned int streaming_intf;
+
+	/* Events */
+	unsigned int event_length;
+	unsigned int event_setup_out : 1;
+};
+
+static inline struct uvc_device *to_uvc(struct usb_function *f)
+{
+	return container_of(f, struct uvc_device, func);
+}
+
+struct uvc_file_handle
+{
+	struct v4l2_fh vfh;
+	struct uvc_video *device;
+};
+
+#define to_uvc_file_handle(handle) \
+	container_of(handle, struct uvc_file_handle, vfh)
+
+/* ------------------------------------------------------------------------
+ * Functions
+ */
+
+extern void uvc_endpoint_stream(struct uvc_device *dev);
+
+extern void uvc_function_connect(struct uvc_device *uvc);
+extern void uvc_function_disconnect(struct uvc_device *uvc);
+
+#endif /* __KERNEL__ */
+
+#endif /* _UVC_GADGET_H_ */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_queue.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_queue.c
new file mode 100644
index 0000000..0cdf89d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_queue.c
@@ -0,0 +1,585 @@
+/*
+ *	uvc_queue.c  --  USB Video Class driver - Buffers management
+ *
+ *	Copyright (C) 2005-2010
+ *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+
+#include "uvc.h"
+
+/* ------------------------------------------------------------------------
+ * Video buffers queue management.
+ *
+ * Video queues is initialized by uvc_queue_init(). The function performs
+ * basic initialization of the uvc_video_queue struct and never fails.
+ *
+ * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
+ * uvc_free_buffers respectively. The former acquires the video queue lock,
+ * while the later must be called with the lock held (so that allocation can
+ * free previously allocated buffers). Trying to free buffers that are mapped
+ * to user space will return -EBUSY.
+ *
+ * Video buffers are managed using two queues. However, unlike most USB video
+ * drivers that use an in queue and an out queue, we use a main queue to hold
+ * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
+ * hold empty buffers. This design (copied from video-buf) minimizes locking
+ * in interrupt, as only one queue is shared between interrupt and user
+ * contexts.
+ *
+ * Use cases
+ * ---------
+ *
+ * Unless stated otherwise, all operations that modify the irq buffers queue
+ * are protected by the irq spinlock.
+ *
+ * 1. The user queues the buffers, starts streaming and dequeues a buffer.
+ *
+ *    The buffers are added to the main and irq queues. Both operations are
+ *    protected by the queue lock, and the later is protected by the irq
+ *    spinlock as well.
+ *
+ *    The completion handler fetches a buffer from the irq queue and fills it
+ *    with video data. If no buffer is available (irq queue empty), the handler
+ *    returns immediately.
+ *
+ *    When the buffer is full, the completion handler removes it from the irq
+ *    queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue.
+ *    At that point, any process waiting on the buffer will be woken up. If a
+ *    process tries to dequeue a buffer after it has been marked ready, the
+ *    dequeing will succeed immediately.
+ *
+ * 2. Buffers are queued, user is waiting on a buffer and the device gets
+ *    disconnected.
+ *
+ *    When the device is disconnected, the kernel calls the completion handler
+ *    with an appropriate status code. The handler marks all buffers in the
+ *    irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
+ *    that any process waiting on a buffer gets woken up.
+ *
+ *    Waking up up the first buffer on the irq list is not enough, as the
+ *    process waiting on the buffer might restart the dequeue operation
+ *    immediately.
+ *
+ */
+
+static void
+uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
+{
+	mutex_init(&queue->mutex);
+	spin_lock_init(&queue->irqlock);
+	INIT_LIST_HEAD(&queue->mainqueue);
+	INIT_LIST_HEAD(&queue->irqqueue);
+	queue->type = type;
+}
+
+/*
+ * Free the video buffers.
+ *
+ * This function must be called with the queue lock held.
+ */
+static int uvc_free_buffers(struct uvc_video_queue *queue)
+{
+	unsigned int i;
+
+	for (i = 0; i < queue->count; ++i) {
+		if (queue->buffer[i].vma_use_count != 0)
+			return -EBUSY;
+	}
+
+	if (queue->count) {
+		vfree(queue->mem);
+		queue->count = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * Allocate the video buffers.
+ *
+ * Pages are reserved to make sure they will not be swapped, as they will be
+ * filled in the URB completion handler.
+ *
+ * Buffers will be individually mapped, so they must all be page aligned.
+ */
+static int
+uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
+		  unsigned int buflength)
+{
+	unsigned int bufsize = PAGE_ALIGN(buflength);
+	unsigned int i;
+	void *mem = NULL;
+	int ret;
+
+	if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
+		nbuffers = UVC_MAX_VIDEO_BUFFERS;
+
+	mutex_lock(&queue->mutex);
+
+	if ((ret = uvc_free_buffers(queue)) < 0)
+		goto done;
+
+	/* Bail out if no buffers should be allocated. */
+	if (nbuffers == 0)
+		goto done;
+
+	/* Decrement the number of buffers until allocation succeeds. */
+	for (; nbuffers > 0; --nbuffers) {
+		mem = vmalloc_32(nbuffers * bufsize);
+		if (mem != NULL)
+			break;
+	}
+
+	if (mem == NULL) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	for (i = 0; i < nbuffers; ++i) {
+		memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
+		queue->buffer[i].buf.index = i;
+		queue->buffer[i].buf.m.offset = i * bufsize;
+		queue->buffer[i].buf.length = buflength;
+		queue->buffer[i].buf.type = queue->type;
+		queue->buffer[i].buf.sequence = 0;
+		queue->buffer[i].buf.field = V4L2_FIELD_NONE;
+		queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
+		queue->buffer[i].buf.flags = 0;
+		init_waitqueue_head(&queue->buffer[i].wait);
+	}
+
+	queue->mem = mem;
+	queue->count = nbuffers;
+	queue->buf_size = bufsize;
+	ret = nbuffers;
+
+done:
+	mutex_unlock(&queue->mutex);
+	return ret;
+}
+
+static void __uvc_query_buffer(struct uvc_buffer *buf,
+		struct v4l2_buffer *v4l2_buf)
+{
+	memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
+
+	if (buf->vma_use_count)
+		v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
+
+	switch (buf->state) {
+	case UVC_BUF_STATE_ERROR:
+	case UVC_BUF_STATE_DONE:
+		v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
+		break;
+	case UVC_BUF_STATE_QUEUED:
+	case UVC_BUF_STATE_ACTIVE:
+		v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
+		break;
+	case UVC_BUF_STATE_IDLE:
+	default:
+		break;
+	}
+}
+
+static int
+uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf)
+{
+	int ret = 0;
+
+	mutex_lock(&queue->mutex);
+	if (v4l2_buf->index >= queue->count) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	__uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
+
+done:
+	mutex_unlock(&queue->mutex);
+	return ret;
+}
+
+/*
+ * Queue a video buffer. Attempting to queue a buffer that has already been
+ * queued will return -EINVAL.
+ */
+static int
+uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf)
+{
+	struct uvc_buffer *buf;
+	unsigned long flags;
+	int ret = 0;
+
+	uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
+
+	if (v4l2_buf->type != queue->type ||
+	    v4l2_buf->memory != V4L2_MEMORY_MMAP) {
+		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
+			"and/or memory (%u).\n", v4l2_buf->type,
+			v4l2_buf->memory);
+		return -EINVAL;
+	}
+
+	mutex_lock(&queue->mutex);
+	if (v4l2_buf->index >= queue->count) {
+		uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	buf = &queue->buffer[v4l2_buf->index];
+	if (buf->state != UVC_BUF_STATE_IDLE) {
+		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
+			"(%u).\n", buf->state);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+	    v4l2_buf->bytesused > buf->buf.length) {
+		uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		buf->buf.bytesused = 0;
+	else
+		buf->buf.bytesused = v4l2_buf->bytesused;
+
+	spin_lock_irqsave(&queue->irqlock, flags);
+	if (queue->flags & UVC_QUEUE_DISCONNECTED) {
+		spin_unlock_irqrestore(&queue->irqlock, flags);
+		ret = -ENODEV;
+		goto done;
+	}
+	buf->state = UVC_BUF_STATE_QUEUED;
+
+	ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
+	queue->flags &= ~UVC_QUEUE_PAUSED;
+
+	list_add_tail(&buf->stream, &queue->mainqueue);
+	list_add_tail(&buf->queue, &queue->irqqueue);
+	spin_unlock_irqrestore(&queue->irqlock, flags);
+
+done:
+	mutex_unlock(&queue->mutex);
+	return ret;
+}
+
+static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
+{
+	if (nonblocking) {
+		return (buf->state != UVC_BUF_STATE_QUEUED &&
+			buf->state != UVC_BUF_STATE_ACTIVE)
+			? 0 : -EAGAIN;
+	}
+
+	return wait_event_interruptible(buf->wait,
+		buf->state != UVC_BUF_STATE_QUEUED &&
+		buf->state != UVC_BUF_STATE_ACTIVE);
+}
+
+/*
+ * Dequeue a video buffer. If nonblocking is false, block until a buffer is
+ * available.
+ */
+static int
+uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf,
+		   int nonblocking)
+{
+	struct uvc_buffer *buf;
+	int ret = 0;
+
+	if (v4l2_buf->type != queue->type ||
+	    v4l2_buf->memory != V4L2_MEMORY_MMAP) {
+		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
+			"and/or memory (%u).\n", v4l2_buf->type,
+			v4l2_buf->memory);
+		return -EINVAL;
+	}
+
+	mutex_lock(&queue->mutex);
+	if (list_empty(&queue->mainqueue)) {
+		uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
+	if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
+		goto done;
+
+	uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
+		buf->buf.index, buf->state, buf->buf.bytesused);
+
+	switch (buf->state) {
+	case UVC_BUF_STATE_ERROR:
+		uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
+			"(transmission error).\n");
+		ret = -EIO;
+	case UVC_BUF_STATE_DONE:
+		buf->state = UVC_BUF_STATE_IDLE;
+		break;
+
+	case UVC_BUF_STATE_IDLE:
+	case UVC_BUF_STATE_QUEUED:
+	case UVC_BUF_STATE_ACTIVE:
+	default:
+		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
+			"(driver bug?).\n", buf->state);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	list_del(&buf->stream);
+	__uvc_query_buffer(buf, v4l2_buf);
+
+done:
+	mutex_unlock(&queue->mutex);
+	return ret;
+}
+
+/*
+ * Poll the video queue.
+ *
+ * This function implements video queue polling and is intended to be used by
+ * the device poll handler.
+ */
+static unsigned int
+uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
+	       poll_table *wait)
+{
+	struct uvc_buffer *buf;
+	unsigned int mask = 0;
+
+	mutex_lock(&queue->mutex);
+	if (list_empty(&queue->mainqueue))
+		goto done;
+
+	buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
+
+	poll_wait(file, &buf->wait, wait);
+	if (buf->state == UVC_BUF_STATE_DONE ||
+	    buf->state == UVC_BUF_STATE_ERROR)
+		mask |= POLLOUT | POLLWRNORM;
+
+done:
+	mutex_unlock(&queue->mutex);
+	return mask;
+}
+
+/*
+ * VMA operations.
+ */
+static void uvc_vm_open(struct vm_area_struct *vma)
+{
+	struct uvc_buffer *buffer = vma->vm_private_data;
+	buffer->vma_use_count++;
+}
+
+static void uvc_vm_close(struct vm_area_struct *vma)
+{
+	struct uvc_buffer *buffer = vma->vm_private_data;
+	buffer->vma_use_count--;
+}
+
+static struct vm_operations_struct uvc_vm_ops = {
+	.open		= uvc_vm_open,
+	.close		= uvc_vm_close,
+};
+
+/*
+ * Memory-map a buffer.
+ *
+ * This function implements video buffer memory mapping and is intended to be
+ * used by the device mmap handler.
+ */
+static int
+uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
+{
+	struct uvc_buffer *uninitialized_var(buffer);
+	struct page *page;
+	unsigned long addr, start, size;
+	unsigned int i;
+	int ret = 0;
+
+	start = vma->vm_start;
+	size = vma->vm_end - vma->vm_start;
+
+	mutex_lock(&queue->mutex);
+
+	for (i = 0; i < queue->count; ++i) {
+		buffer = &queue->buffer[i];
+		if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
+			break;
+	}
+
+	if (i == queue->count || size != queue->buf_size) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * VM_IO marks the area as being an mmaped region for I/O to a
+	 * device. It also prevents the region from being core dumped.
+	 */
+	vma->vm_flags |= VM_IO;
+
+	addr = (unsigned long)queue->mem + buffer->buf.m.offset;
+	while (size > 0) {
+		page = vmalloc_to_page((void *)addr);
+		if ((ret = vm_insert_page(vma, start, page)) < 0)
+			goto done;
+
+		start += PAGE_SIZE;
+		addr += PAGE_SIZE;
+		size -= PAGE_SIZE;
+	}
+
+	vma->vm_ops = &uvc_vm_ops;
+	vma->vm_private_data = buffer;
+	uvc_vm_open(vma);
+
+done:
+	mutex_unlock(&queue->mutex);
+	return ret;
+}
+
+/*
+ * Cancel the video buffers queue.
+ *
+ * Cancelling the queue marks all buffers on the irq queue as erroneous,
+ * wakes them up and removes them from the queue.
+ *
+ * If the disconnect parameter is set, further calls to uvc_queue_buffer will
+ * fail with -ENODEV.
+ *
+ * This function acquires the irq spinlock and can be called from interrupt
+ * context.
+ */
+static void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
+{
+	struct uvc_buffer *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->irqlock, flags);
+	while (!list_empty(&queue->irqqueue)) {
+		buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+				       queue);
+		list_del(&buf->queue);
+		buf->state = UVC_BUF_STATE_ERROR;
+		wake_up(&buf->wait);
+	}
+	/* This must be protected by the irqlock spinlock to avoid race
+	 * conditions between uvc_queue_buffer and the disconnection event that
+	 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
+	 * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
+	 * state outside the queue code.
+	 */
+	if (disconnect)
+		queue->flags |= UVC_QUEUE_DISCONNECTED;
+	spin_unlock_irqrestore(&queue->irqlock, flags);
+}
+
+/*
+ * Enable or disable the video buffers queue.
+ *
+ * The queue must be enabled before starting video acquisition and must be
+ * disabled after stopping it. This ensures that the video buffers queue
+ * state can be properly initialized before buffers are accessed from the
+ * interrupt handler.
+ *
+ * Enabling the video queue initializes parameters (such as sequence number,
+ * sync pattern, ...). If the queue is already enabled, return -EBUSY.
+ *
+ * Disabling the video queue cancels the queue and removes all buffers from
+ * the main queue.
+ *
+ * This function can't be called from interrupt context. Use
+ * uvc_queue_cancel() instead.
+ */
+static int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
+{
+	unsigned int i;
+	int ret = 0;
+
+	mutex_lock(&queue->mutex);
+	if (enable) {
+		if (uvc_queue_streaming(queue)) {
+			ret = -EBUSY;
+			goto done;
+		}
+		queue->sequence = 0;
+		queue->flags |= UVC_QUEUE_STREAMING;
+		queue->buf_used = 0;
+	} else {
+		uvc_queue_cancel(queue, 0);
+		INIT_LIST_HEAD(&queue->mainqueue);
+
+		for (i = 0; i < queue->count; ++i)
+			queue->buffer[i].state = UVC_BUF_STATE_IDLE;
+
+		queue->flags &= ~UVC_QUEUE_STREAMING;
+	}
+
+done:
+	mutex_unlock(&queue->mutex);
+	return ret;
+}
+
+/* called with queue->irqlock held.. */
+static struct uvc_buffer *
+uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
+{
+	struct uvc_buffer *nextbuf;
+
+	if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
+	    buf->buf.length != buf->buf.bytesused) {
+		buf->state = UVC_BUF_STATE_QUEUED;
+		buf->buf.bytesused = 0;
+		return buf;
+	}
+
+	list_del(&buf->queue);
+	if (!list_empty(&queue->irqqueue))
+		nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+					   queue);
+	else
+		nextbuf = NULL;
+
+	buf->buf.sequence = queue->sequence++;
+	do_gettimeofday(&buf->buf.timestamp);
+
+	wake_up(&buf->wait);
+	return nextbuf;
+}
+
+static struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue)
+{
+	struct uvc_buffer *buf = NULL;
+
+	if (!list_empty(&queue->irqqueue))
+		buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+				       queue);
+	else
+		queue->flags |= UVC_QUEUE_PAUSED;
+
+	return buf;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_queue.h b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_queue.h
new file mode 100644
index 0000000..1812a8e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_queue.h
@@ -0,0 +1,69 @@
+#ifndef _UVC_QUEUE_H_
+#define _UVC_QUEUE_H_
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/videodev2.h>
+
+/* Maximum frame size in bytes, for sanity checking. */
+#define UVC_MAX_FRAME_SIZE	(16*1024*1024)
+/* Maximum number of video buffers. */
+#define UVC_MAX_VIDEO_BUFFERS	32
+
+/* ------------------------------------------------------------------------
+ * Structures.
+ */
+
+enum uvc_buffer_state {
+	UVC_BUF_STATE_IDLE	= 0,
+	UVC_BUF_STATE_QUEUED	= 1,
+	UVC_BUF_STATE_ACTIVE	= 2,
+	UVC_BUF_STATE_DONE	= 3,
+	UVC_BUF_STATE_ERROR	= 4,
+};
+
+struct uvc_buffer {
+	unsigned long vma_use_count;
+	struct list_head stream;
+
+	/* Touched by interrupt handler. */
+	struct v4l2_buffer buf;
+	struct list_head queue;
+	wait_queue_head_t wait;
+	enum uvc_buffer_state state;
+};
+
+#define UVC_QUEUE_STREAMING		(1 << 0)
+#define UVC_QUEUE_DISCONNECTED		(1 << 1)
+#define UVC_QUEUE_DROP_INCOMPLETE	(1 << 2)
+#define UVC_QUEUE_PAUSED		(1 << 3)
+
+struct uvc_video_queue {
+	enum v4l2_buf_type type;
+
+	void *mem;
+	unsigned int flags;
+	__u32 sequence;
+
+	unsigned int count;
+	unsigned int buf_size;
+	unsigned int buf_used;
+	struct uvc_buffer buffer[UVC_MAX_VIDEO_BUFFERS];
+	struct mutex mutex;	/* protects buffers and mainqueue */
+	spinlock_t irqlock;	/* protects irqqueue */
+
+	struct list_head mainqueue;
+	struct list_head irqqueue;
+};
+
+static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
+{
+	return queue->flags & UVC_QUEUE_STREAMING;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _UVC_QUEUE_H_ */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_v4l2.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_v4l2.c
new file mode 100644
index 0000000..54d7ca5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_v4l2.c
@@ -0,0 +1,356 @@
+/*
+ *	uvc_v4l2.c  --  USB Video Class Gadget driver
+ *
+ *	Copyright (C) 2009-2010
+ *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "uvc.h"
+#include "uvc_queue.h"
+
+/* --------------------------------------------------------------------------
+ * Requests handling
+ */
+
+static int
+uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
+{
+	struct usb_composite_dev *cdev = uvc->func.config->cdev;
+	struct usb_request *req = uvc->control_req;
+
+	if (data->length < 0)
+		return usb_ep_set_halt(cdev->gadget->ep0);
+
+	req->length = min_t(unsigned int, uvc->event_length, data->length);
+	req->zero = data->length < uvc->event_length;
+	req->dma = DMA_ADDR_INVALID;
+
+	memcpy(req->buf, data->data, data->length);
+
+	return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL);
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2
+ */
+
+struct uvc_format
+{
+	u8 bpp;
+	u32 fcc;
+};
+
+static struct uvc_format uvc_formats[] = {
+	{ 16, V4L2_PIX_FMT_YUYV  },
+	{ 0,  V4L2_PIX_FMT_MJPEG },
+};
+
+static int
+uvc_v4l2_get_format(struct uvc_video *video, struct v4l2_format *fmt)
+{
+	fmt->fmt.pix.pixelformat = video->fcc;
+	fmt->fmt.pix.width = video->width;
+	fmt->fmt.pix.height = video->height;
+	fmt->fmt.pix.field = V4L2_FIELD_NONE;
+	fmt->fmt.pix.bytesperline = video->bpp * video->width / 8;
+	fmt->fmt.pix.sizeimage = video->imagesize;
+	fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+	fmt->fmt.pix.priv = 0;
+
+	return 0;
+}
+
+static int
+uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
+{
+	struct uvc_format *format;
+	unsigned int imagesize;
+	unsigned int bpl;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(uvc_formats); ++i) {
+		format = &uvc_formats[i];
+		if (format->fcc == fmt->fmt.pix.pixelformat)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(uvc_formats)) {
+		printk(KERN_INFO "Unsupported format 0x%08x.\n",
+			fmt->fmt.pix.pixelformat);
+		return -EINVAL;
+	}
+
+	bpl = format->bpp * fmt->fmt.pix.width / 8;
+	imagesize = bpl ? bpl * fmt->fmt.pix.height : fmt->fmt.pix.sizeimage;
+
+	video->fcc = format->fcc;
+	video->bpp = format->bpp;
+	video->width = fmt->fmt.pix.width;
+	video->height = fmt->fmt.pix.height;
+	video->imagesize = imagesize;
+
+	fmt->fmt.pix.field = V4L2_FIELD_NONE;
+	fmt->fmt.pix.bytesperline = bpl;
+	fmt->fmt.pix.sizeimage = imagesize;
+	fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+	fmt->fmt.pix.priv = 0;
+
+	return 0;
+}
+
+static int
+uvc_v4l2_open(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct uvc_device *uvc = video_get_drvdata(vdev);
+	struct uvc_file_handle *handle;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (handle == NULL)
+		return -ENOMEM;
+
+	v4l2_fh_init(&handle->vfh, vdev);
+	v4l2_fh_add(&handle->vfh);
+
+	handle->device = &uvc->video;
+	file->private_data = &handle->vfh;
+
+	uvc_function_connect(uvc);
+	return 0;
+}
+
+static int
+uvc_v4l2_release(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct uvc_device *uvc = video_get_drvdata(vdev);
+	struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+	struct uvc_video *video = handle->device;
+
+	uvc_function_disconnect(uvc);
+
+	uvc_video_enable(video, 0);
+	mutex_lock(&video->queue.mutex);
+	if (uvc_free_buffers(&video->queue) < 0)
+		printk(KERN_ERR "uvc_v4l2_release: Unable to free "
+				"buffers.\n");
+	mutex_unlock(&video->queue.mutex);
+
+	file->private_data = NULL;
+	v4l2_fh_del(&handle->vfh);
+	v4l2_fh_exit(&handle->vfh);
+	kfree(handle);
+	return 0;
+}
+
+static long
+uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct uvc_device *uvc = video_get_drvdata(vdev);
+	struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+	struct usb_composite_dev *cdev = uvc->func.config->cdev;
+	struct uvc_video *video = &uvc->video;
+	int ret = 0;
+
+	switch (cmd) {
+	/* Query capabilities */
+	case VIDIOC_QUERYCAP:
+	{
+		struct v4l2_capability *cap = arg;
+
+		memset(cap, 0, sizeof *cap);
+		strncpy(cap->driver, "g_uvc", sizeof(cap->driver));
+		strncpy(cap->card, cdev->gadget->name, sizeof(cap->card));
+		strncpy(cap->bus_info, dev_name(&cdev->gadget->dev),
+			sizeof cap->bus_info);
+		cap->version = DRIVER_VERSION_NUMBER;
+		cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+		break;
+	}
+
+	/* Get & Set format */
+	case VIDIOC_G_FMT:
+	{
+		struct v4l2_format *fmt = arg;
+
+		if (fmt->type != video->queue.type)
+			return -EINVAL;
+
+		return uvc_v4l2_get_format(video, fmt);
+	}
+
+	case VIDIOC_S_FMT:
+	{
+		struct v4l2_format *fmt = arg;
+
+		if (fmt->type != video->queue.type)
+			return -EINVAL;
+
+		return uvc_v4l2_set_format(video, fmt);
+	}
+
+	/* Buffers & streaming */
+	case VIDIOC_REQBUFS:
+	{
+		struct v4l2_requestbuffers *rb = arg;
+
+		if (rb->type != video->queue.type ||
+		    rb->memory != V4L2_MEMORY_MMAP)
+			return -EINVAL;
+
+		ret = uvc_alloc_buffers(&video->queue, rb->count,
+					video->imagesize);
+		if (ret < 0)
+			return ret;
+
+		rb->count = ret;
+		ret = 0;
+		break;
+	}
+
+	case VIDIOC_QUERYBUF:
+	{
+		struct v4l2_buffer *buf = arg;
+
+		if (buf->type != video->queue.type)
+			return -EINVAL;
+
+		return uvc_query_buffer(&video->queue, buf);
+	}
+
+	case VIDIOC_QBUF:
+		if ((ret = uvc_queue_buffer(&video->queue, arg)) < 0)
+			return ret;
+
+		return uvc_video_pump(video);
+
+	case VIDIOC_DQBUF:
+		return uvc_dequeue_buffer(&video->queue, arg,
+			file->f_flags & O_NONBLOCK);
+
+	case VIDIOC_STREAMON:
+	{
+		int *type = arg;
+
+		if (*type != video->queue.type)
+			return -EINVAL;
+
+		return uvc_video_enable(video, 1);
+	}
+
+	case VIDIOC_STREAMOFF:
+	{
+		int *type = arg;
+
+		if (*type != video->queue.type)
+			return -EINVAL;
+
+		return uvc_video_enable(video, 0);
+	}
+
+	/* Events */
+        case VIDIOC_DQEVENT:
+	{
+		struct v4l2_event *event = arg;
+
+		ret = v4l2_event_dequeue(&handle->vfh, event,
+					 file->f_flags & O_NONBLOCK);
+		if (ret == 0 && event->type == UVC_EVENT_SETUP) {
+			struct uvc_event *uvc_event = (void *)&event->u.data;
+
+			/* Tell the complete callback to generate an event for
+			 * the next request that will be enqueued by
+			 * uvc_event_write.
+			 */
+			uvc->event_setup_out =
+				!(uvc_event->req.bRequestType & USB_DIR_IN);
+			uvc->event_length = uvc_event->req.wLength;
+		}
+
+		return ret;
+	}
+
+	case VIDIOC_SUBSCRIBE_EVENT:
+	{
+		struct v4l2_event_subscription *sub = arg;
+
+		if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
+			return -EINVAL;
+
+		return v4l2_event_subscribe(&handle->vfh, arg, 2);
+	}
+
+	case VIDIOC_UNSUBSCRIBE_EVENT:
+		return v4l2_event_unsubscribe(&handle->vfh, arg);
+
+	case UVCIOC_SEND_RESPONSE:
+		ret = uvc_send_response(uvc, arg);
+		break;
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+static long
+uvc_v4l2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	return video_usercopy(file, cmd, arg, uvc_v4l2_do_ioctl);
+}
+
+static int
+uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct uvc_device *uvc = video_get_drvdata(vdev);
+
+	return uvc_queue_mmap(&uvc->video.queue, vma);
+}
+
+static unsigned int
+uvc_v4l2_poll(struct file *file, poll_table *wait)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct uvc_device *uvc = video_get_drvdata(vdev);
+	struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+	unsigned int mask = 0;
+
+	poll_wait(file, &handle->vfh.wait, wait);
+	if (v4l2_event_pending(&handle->vfh))
+		mask |= POLLPRI;
+
+	mask |= uvc_queue_poll(&uvc->video.queue, file, wait);
+
+	return mask;
+}
+
+static struct v4l2_file_operations uvc_v4l2_fops = {
+	.owner		= THIS_MODULE,
+	.open		= uvc_v4l2_open,
+	.release	= uvc_v4l2_release,
+	.ioctl		= uvc_v4l2_ioctl,
+	.mmap		= uvc_v4l2_mmap,
+	.poll		= uvc_v4l2_poll,
+};
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_video.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_video.c
new file mode 100644
index 0000000..b0e53a8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/uvc_video.c
@@ -0,0 +1,385 @@
+/*
+ *	uvc_video.c  --  USB Video Class Gadget driver
+ *
+ *	Copyright (C) 2009-2010
+ *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <media/v4l2-dev.h>
+
+#include "uvc.h"
+#include "uvc_queue.h"
+
+/* --------------------------------------------------------------------------
+ * Video codecs
+ */
+
+static int
+uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
+		u8 *data, int len)
+{
+	data[0] = 2;
+	data[1] = UVC_STREAM_EOH | video->fid;
+
+	if (buf->buf.bytesused - video->queue.buf_used <= len - 2)
+		data[1] |= UVC_STREAM_EOF;
+
+	return 2;
+}
+
+static int
+uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
+		u8 *data, int len)
+{
+	struct uvc_video_queue *queue = &video->queue;
+	unsigned int nbytes;
+	void *mem;
+
+	/* Copy video data to the USB buffer. */
+	mem = queue->mem + buf->buf.m.offset + queue->buf_used;
+	nbytes = min((unsigned int)len, buf->buf.bytesused - queue->buf_used);
+
+	memcpy(data, mem, nbytes);
+	queue->buf_used += nbytes;
+
+	return nbytes;
+}
+
+static void
+uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
+		struct uvc_buffer *buf)
+{
+	void *mem = req->buf;
+	int len = video->req_size;
+	int ret;
+
+	/* Add a header at the beginning of the payload. */
+	if (video->payload_size == 0) {
+		ret = uvc_video_encode_header(video, buf, mem, len);
+		video->payload_size += ret;
+		mem += ret;
+		len -= ret;
+	}
+
+	/* Process video data. */
+	len = min((int)(video->max_payload_size - video->payload_size), len);
+	ret = uvc_video_encode_data(video, buf, mem, len);
+
+	video->payload_size += ret;
+	len -= ret;
+
+	req->length = video->req_size - len;
+	req->zero = video->payload_size == video->max_payload_size;
+
+	if (buf->buf.bytesused == video->queue.buf_used) {
+		video->queue.buf_used = 0;
+		buf->state = UVC_BUF_STATE_DONE;
+		uvc_queue_next_buffer(&video->queue, buf);
+		video->fid ^= UVC_STREAM_FID;
+
+		video->payload_size = 0;
+	}
+
+	if (video->payload_size == video->max_payload_size ||
+	    buf->buf.bytesused == video->queue.buf_used)
+		video->payload_size = 0;
+}
+
+static void
+uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
+		struct uvc_buffer *buf)
+{
+	void *mem = req->buf;
+	int len = video->req_size;
+	int ret;
+
+	/* Add the header. */
+	ret = uvc_video_encode_header(video, buf, mem, len);
+	mem += ret;
+	len -= ret;
+
+	/* Process video data. */
+	ret = uvc_video_encode_data(video, buf, mem, len);
+	len -= ret;
+
+	req->length = video->req_size - len;
+
+	if (buf->buf.bytesused == video->queue.buf_used) {
+		video->queue.buf_used = 0;
+		buf->state = UVC_BUF_STATE_DONE;
+		uvc_queue_next_buffer(&video->queue, buf);
+		video->fid ^= UVC_STREAM_FID;
+	}
+}
+
+/* --------------------------------------------------------------------------
+ * Request handling
+ */
+
+/*
+ * I somehow feel that synchronisation won't be easy to achieve here. We have
+ * three events that control USB requests submission:
+ *
+ * - USB request completion: the completion handler will resubmit the request
+ *   if a video buffer is available.
+ *
+ * - USB interface setting selection: in response to a SET_INTERFACE request,
+ *   the handler will start streaming if a video buffer is available and if
+ *   video is not currently streaming.
+ *
+ * - V4L2 buffer queueing: the driver will start streaming if video is not
+ *   currently streaming.
+ *
+ * Race conditions between those 3 events might lead to deadlocks or other
+ * nasty side effects.
+ *
+ * The "video currently streaming" condition can't be detected by the irqqueue
+ * being empty, as a request can still be in flight. A separate "queue paused"
+ * flag is thus needed.
+ *
+ * The paused flag will be set when we try to retrieve the irqqueue head if the
+ * queue is empty, and cleared when we queue a buffer.
+ *
+ * The USB request completion handler will get the buffer at the irqqueue head
+ * under protection of the queue spinlock. If the queue is empty, the streaming
+ * paused flag will be set. Right after releasing the spinlock a userspace
+ * application can queue a buffer. The flag will then cleared, and the ioctl
+ * handler will restart the video stream.
+ */
+static void
+uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct uvc_video *video = req->context;
+	struct uvc_buffer *buf;
+	unsigned long flags;
+	int ret;
+
+	switch (req->status) {
+	case 0:
+		break;
+
+	case -ESHUTDOWN:
+		printk(KERN_INFO "VS request cancelled.\n");
+		goto requeue;
+
+	default:
+		printk(KERN_INFO "VS request completed with status %d.\n",
+			req->status);
+		goto requeue;
+	}
+
+	spin_lock_irqsave(&video->queue.irqlock, flags);
+	buf = uvc_queue_head(&video->queue);
+	if (buf == NULL) {
+		spin_unlock_irqrestore(&video->queue.irqlock, flags);
+		goto requeue;
+	}
+
+	video->encode(req, video, buf);
+
+	if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) {
+		printk(KERN_INFO "Failed to queue request (%d).\n", ret);
+		usb_ep_set_halt(ep);
+		spin_unlock_irqrestore(&video->queue.irqlock, flags);
+		goto requeue;
+	}
+	spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+	return;
+
+requeue:
+	spin_lock_irqsave(&video->req_lock, flags);
+	list_add_tail(&req->list, &video->req_free);
+	spin_unlock_irqrestore(&video->req_lock, flags);
+}
+
+static int
+uvc_video_free_requests(struct uvc_video *video)
+{
+	unsigned int i;
+
+	for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
+		if (video->req[i]) {
+			usb_ep_free_request(video->ep, video->req[i]);
+			video->req[i] = NULL;
+		}
+
+		if (video->req_buffer[i]) {
+			kfree(video->req_buffer[i]);
+			video->req_buffer[i] = NULL;
+		}
+	}
+
+	INIT_LIST_HEAD(&video->req_free);
+	video->req_size = 0;
+	return 0;
+}
+
+static int
+uvc_video_alloc_requests(struct uvc_video *video)
+{
+	unsigned int i;
+	int ret = -ENOMEM;
+
+	BUG_ON(video->req_size);
+
+	for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
+		video->req_buffer[i] = kmalloc(video->ep->maxpacket, GFP_KERNEL);
+		if (video->req_buffer[i] == NULL)
+			goto error;
+
+		video->req[i] = usb_ep_alloc_request(video->ep, GFP_KERNEL);
+		if (video->req[i] == NULL)
+			goto error;
+
+		video->req[i]->buf = video->req_buffer[i];
+		video->req[i]->length = 0;
+		video->req[i]->dma = DMA_ADDR_INVALID;
+		video->req[i]->complete = uvc_video_complete;
+		video->req[i]->context = video;
+
+		list_add_tail(&video->req[i]->list, &video->req_free);
+	}
+
+	video->req_size = video->ep->maxpacket;
+	return 0;
+
+error:
+	uvc_video_free_requests(video);
+	return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * Video streaming
+ */
+
+/*
+ * uvc_video_pump - Pump video data into the USB requests
+ *
+ * This function fills the available USB requests (listed in req_free) with
+ * video data from the queued buffers.
+ */
+static int
+uvc_video_pump(struct uvc_video *video)
+{
+	struct usb_request *req;
+	struct uvc_buffer *buf;
+	unsigned long flags;
+	int ret;
+
+	/* FIXME TODO Race between uvc_video_pump and requests completion
+	 * handler ???
+	 */
+
+	while (1) {
+		/* Retrieve the first available USB request, protected by the
+		 * request lock.
+		 */
+		spin_lock_irqsave(&video->req_lock, flags);
+		if (list_empty(&video->req_free)) {
+			spin_unlock_irqrestore(&video->req_lock, flags);
+			return 0;
+		}
+		req = list_first_entry(&video->req_free, struct usb_request,
+					list);
+		list_del(&req->list);
+		spin_unlock_irqrestore(&video->req_lock, flags);
+
+		/* Retrieve the first available video buffer and fill the
+		 * request, protected by the video queue irqlock.
+		 */
+		spin_lock_irqsave(&video->queue.irqlock, flags);
+		buf = uvc_queue_head(&video->queue);
+		if (buf == NULL) {
+			spin_unlock_irqrestore(&video->queue.irqlock, flags);
+			break;
+		}
+
+		video->encode(req, video, buf);
+
+		/* Queue the USB request */
+		if ((ret = usb_ep_queue(video->ep, req, GFP_KERNEL)) < 0) {
+			printk(KERN_INFO "Failed to queue request (%d)\n", ret);
+			usb_ep_set_halt(video->ep);
+			spin_unlock_irqrestore(&video->queue.irqlock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&video->queue.irqlock, flags);
+	}
+
+	spin_lock_irqsave(&video->req_lock, flags);
+	list_add_tail(&req->list, &video->req_free);
+	spin_unlock_irqrestore(&video->req_lock, flags);
+	return 0;
+}
+
+/*
+ * Enable or disable the video stream.
+ */
+static int
+uvc_video_enable(struct uvc_video *video, int enable)
+{
+	unsigned int i;
+	int ret;
+
+	if (video->ep == NULL) {
+		printk(KERN_INFO "Video enable failed, device is "
+			"uninitialized.\n");
+		return -ENODEV;
+	}
+
+	if (!enable) {
+		for (i = 0; i < UVC_NUM_REQUESTS; ++i)
+			usb_ep_dequeue(video->ep, video->req[i]);
+
+		uvc_video_free_requests(video);
+		uvc_queue_enable(&video->queue, 0);
+		return 0;
+	}
+
+	if ((ret = uvc_queue_enable(&video->queue, 1)) < 0)
+		return ret;
+
+	if ((ret = uvc_video_alloc_requests(video)) < 0)
+		return ret;
+
+	if (video->max_payload_size) {
+		video->encode = uvc_video_encode_bulk;
+		video->payload_size = 0;
+	} else
+		video->encode = uvc_video_encode_isoc;
+
+	return uvc_video_pump(video);
+}
+
+/*
+ * Initialize the UVC video stream.
+ */
+static int
+uvc_video_init(struct uvc_video *video)
+{
+	INIT_LIST_HEAD(&video->req_free);
+	spin_lock_init(&video->req_lock);
+
+	video->fcc = V4L2_PIX_FMT_YUYV;
+	video->bpp = 16;
+	video->width = 320;
+	video->height = 240;
+	video->imagesize = 320 * 240 * 2;
+
+	/* Initialize the video buffers queue. */
+	uvc_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	return 0;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/webcam.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/webcam.c
new file mode 100644
index 0000000..668fe12
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/webcam.c
@@ -0,0 +1,399 @@
+/*
+ *	webcam.c -- USB webcam gadget driver
+ *
+ *	Copyright (C) 2009-2010
+ *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/video.h>
+
+#include "f_uvc.h"
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "uvc_queue.c"
+#include "uvc_video.c"
+#include "uvc_v4l2.c"
+#include "f_uvc.c"
+
+/* --------------------------------------------------------------------------
+ * Device descriptor
+ */
+
+#define WEBCAM_VENDOR_ID		0x1d6b	/* Linux Foundation */
+#define WEBCAM_PRODUCT_ID		0x0102	/* Webcam A/V gadget */
+#define WEBCAM_DEVICE_BCD		0x0010	/* 0.10 */
+
+static char webcam_vendor_label[] = "Linux Foundation";
+static char webcam_product_label[] = "Webcam gadget";
+static char webcam_config_label[] = "Video";
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_DESCRIPTION_IDX		2
+
+static struct usb_string webcam_strings[] = {
+	[STRING_MANUFACTURER_IDX].s = webcam_vendor_label,
+	[STRING_PRODUCT_IDX].s = webcam_product_label,
+	[STRING_DESCRIPTION_IDX].s = webcam_config_label,
+	{  }
+};
+
+static struct usb_gadget_strings webcam_stringtab = {
+	.language = 0x0409,	/* en-us */
+	.strings = webcam_strings,
+};
+
+static struct usb_gadget_strings *webcam_device_strings[] = {
+	&webcam_stringtab,
+	NULL,
+};
+
+static struct usb_device_descriptor webcam_device_descriptor = {
+	.bLength		= USB_DT_DEVICE_SIZE,
+	.bDescriptorType	= USB_DT_DEVICE,
+	.bcdUSB			= cpu_to_le16(0x0200),
+	.bDeviceClass		= USB_CLASS_MISC,
+	.bDeviceSubClass	= 0x02,
+	.bDeviceProtocol	= 0x01,
+	.bMaxPacketSize0	= 0, /* dynamic */
+	.idVendor		= cpu_to_le16(WEBCAM_VENDOR_ID),
+	.idProduct		= cpu_to_le16(WEBCAM_PRODUCT_ID),
+	.bcdDevice		= cpu_to_le16(WEBCAM_DEVICE_BCD),
+	.iManufacturer		= 0, /* dynamic */
+	.iProduct		= 0, /* dynamic */
+	.iSerialNumber		= 0, /* dynamic */
+	.bNumConfigurations	= 0, /* dynamic */
+};
+
+DECLARE_UVC_HEADER_DESCRIPTOR(1);
+
+static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
+	.bLength		= UVC_DT_HEADER_SIZE(1),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VC_HEADER,
+	.bcdUVC			= cpu_to_le16(0x0100),
+	.wTotalLength		= 0, /* dynamic */
+	.dwClockFrequency	= cpu_to_le32(48000000),
+	.bInCollection		= 0, /* dynamic */
+	.baInterfaceNr[0]	= 0, /* dynamic */
+};
+
+static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = {
+	.bLength		= UVC_DT_CAMERA_TERMINAL_SIZE(3),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VC_INPUT_TERMINAL,
+	.bTerminalID		= 1,
+	.wTerminalType		= cpu_to_le16(0x0201),
+	.bAssocTerminal		= 0,
+	.iTerminal		= 0,
+	.wObjectiveFocalLengthMin	= cpu_to_le16(0),
+	.wObjectiveFocalLengthMax	= cpu_to_le16(0),
+	.wOcularFocalLength		= cpu_to_le16(0),
+	.bControlSize		= 3,
+	.bmControls[0]		= 2,
+	.bmControls[1]		= 0,
+	.bmControls[2]		= 0,
+};
+
+static const struct uvc_processing_unit_descriptor uvc_processing = {
+	.bLength		= UVC_DT_PROCESSING_UNIT_SIZE(2),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VC_PROCESSING_UNIT,
+	.bUnitID		= 2,
+	.bSourceID		= 1,
+	.wMaxMultiplier		= cpu_to_le16(16*1024),
+	.bControlSize		= 2,
+	.bmControls[0]		= 1,
+	.bmControls[1]		= 0,
+	.iProcessing		= 0,
+};
+
+static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
+	.bLength		= UVC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VC_OUTPUT_TERMINAL,
+	.bTerminalID		= 3,
+	.wTerminalType		= cpu_to_le16(0x0101),
+	.bAssocTerminal		= 0,
+	.bSourceID		= 2,
+	.iTerminal		= 0,
+};
+
+DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(1, 2);
+
+static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
+	.bLength		= UVC_DT_INPUT_HEADER_SIZE(1, 2),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VS_INPUT_HEADER,
+	.bNumFormats		= 2,
+	.wTotalLength		= 0, /* dynamic */
+	.bEndpointAddress	= 0, /* dynamic */
+	.bmInfo			= 0,
+	.bTerminalLink		= 3,
+	.bStillCaptureMethod	= 0,
+	.bTriggerSupport	= 0,
+	.bTriggerUsage		= 0,
+	.bControlSize		= 1,
+	.bmaControls[0][0]	= 0,
+	.bmaControls[1][0]	= 4,
+};
+
+static const struct uvc_format_uncompressed uvc_format_yuv = {
+	.bLength		= UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VS_FORMAT_UNCOMPRESSED,
+	.bFormatIndex		= 1,
+	.bNumFrameDescriptors	= 2,
+	.guidFormat		=
+		{ 'Y',  'U',  'Y',  '2', 0x00, 0x00, 0x10, 0x00,
+		 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
+	.bBitsPerPixel		= 16,
+	.bDefaultFrameIndex	= 1,
+	.bAspectRatioX		= 0,
+	.bAspectRatioY		= 0,
+	.bmInterfaceFlags	= 0,
+	.bCopyProtect		= 0,
+};
+
+DECLARE_UVC_FRAME_UNCOMPRESSED(1);
+DECLARE_UVC_FRAME_UNCOMPRESSED(3);
+
+static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
+	.bLength		= UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VS_FRAME_UNCOMPRESSED,
+	.bFrameIndex		= 1,
+	.bmCapabilities		= 0,
+	.wWidth			= cpu_to_le16(640),
+	.wHeight		= cpu_to_le16(360),
+	.dwMinBitRate		= cpu_to_le32(18432000),
+	.dwMaxBitRate		= cpu_to_le32(55296000),
+	.dwMaxVideoFrameBufferSize	= cpu_to_le32(460800),
+	.dwDefaultFrameInterval	= cpu_to_le32(666666),
+	.bFrameIntervalType	= 3,
+	.dwFrameInterval[0]	= cpu_to_le32(666666),
+	.dwFrameInterval[1]	= cpu_to_le32(1000000),
+	.dwFrameInterval[2]	= cpu_to_le32(5000000),
+};
+
+static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
+	.bLength		= UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VS_FRAME_UNCOMPRESSED,
+	.bFrameIndex		= 2,
+	.bmCapabilities		= 0,
+	.wWidth			= cpu_to_le16(1280),
+	.wHeight		= cpu_to_le16(720),
+	.dwMinBitRate		= cpu_to_le32(29491200),
+	.dwMaxBitRate		= cpu_to_le32(29491200),
+	.dwMaxVideoFrameBufferSize	= cpu_to_le32(1843200),
+	.dwDefaultFrameInterval	= cpu_to_le32(5000000),
+	.bFrameIntervalType	= 1,
+	.dwFrameInterval[0]	= cpu_to_le32(5000000),
+};
+
+static const struct uvc_format_mjpeg uvc_format_mjpg = {
+	.bLength		= UVC_DT_FORMAT_MJPEG_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VS_FORMAT_MJPEG,
+	.bFormatIndex		= 2,
+	.bNumFrameDescriptors	= 2,
+	.bmFlags		= 0,
+	.bDefaultFrameIndex	= 1,
+	.bAspectRatioX		= 0,
+	.bAspectRatioY		= 0,
+	.bmInterfaceFlags	= 0,
+	.bCopyProtect		= 0,
+};
+
+DECLARE_UVC_FRAME_MJPEG(1);
+DECLARE_UVC_FRAME_MJPEG(3);
+
+static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
+	.bLength		= UVC_DT_FRAME_MJPEG_SIZE(3),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VS_FRAME_MJPEG,
+	.bFrameIndex		= 1,
+	.bmCapabilities		= 0,
+	.wWidth			= cpu_to_le16(640),
+	.wHeight		= cpu_to_le16(360),
+	.dwMinBitRate		= cpu_to_le32(18432000),
+	.dwMaxBitRate		= cpu_to_le32(55296000),
+	.dwMaxVideoFrameBufferSize	= cpu_to_le32(460800),
+	.dwDefaultFrameInterval	= cpu_to_le32(666666),
+	.bFrameIntervalType	= 3,
+	.dwFrameInterval[0]	= cpu_to_le32(666666),
+	.dwFrameInterval[1]	= cpu_to_le32(1000000),
+	.dwFrameInterval[2]	= cpu_to_le32(5000000),
+};
+
+static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
+	.bLength		= UVC_DT_FRAME_MJPEG_SIZE(1),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VS_FRAME_MJPEG,
+	.bFrameIndex		= 2,
+	.bmCapabilities		= 0,
+	.wWidth			= cpu_to_le16(1280),
+	.wHeight		= cpu_to_le16(720),
+	.dwMinBitRate		= cpu_to_le32(29491200),
+	.dwMaxBitRate		= cpu_to_le32(29491200),
+	.dwMaxVideoFrameBufferSize	= cpu_to_le32(1843200),
+	.dwDefaultFrameInterval	= cpu_to_le32(5000000),
+	.bFrameIntervalType	= 1,
+	.dwFrameInterval[0]	= cpu_to_le32(5000000),
+};
+
+static const struct uvc_color_matching_descriptor uvc_color_matching = {
+	.bLength		= UVC_DT_COLOR_MATCHING_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubType	= UVC_VS_COLORFORMAT,
+	.bColorPrimaries	= 1,
+	.bTransferCharacteristics	= 1,
+	.bMatrixCoefficients	= 4,
+};
+
+static const struct uvc_descriptor_header * const uvc_control_cls[] = {
+	(const struct uvc_descriptor_header *) &uvc_control_header,
+	(const struct uvc_descriptor_header *) &uvc_camera_terminal,
+	(const struct uvc_descriptor_header *) &uvc_processing,
+	(const struct uvc_descriptor_header *) &uvc_output_terminal,
+	NULL,
+};
+
+static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
+	(const struct uvc_descriptor_header *) &uvc_input_header,
+	(const struct uvc_descriptor_header *) &uvc_format_yuv,
+	(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+	(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+	(const struct uvc_descriptor_header *) &uvc_format_mjpg,
+	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+	(const struct uvc_descriptor_header *) &uvc_color_matching,
+	NULL,
+};
+
+static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
+	(const struct uvc_descriptor_header *) &uvc_input_header,
+	(const struct uvc_descriptor_header *) &uvc_format_yuv,
+	(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+	(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+	(const struct uvc_descriptor_header *) &uvc_format_mjpg,
+	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+	(const struct uvc_descriptor_header *) &uvc_color_matching,
+	NULL,
+};
+
+/* --------------------------------------------------------------------------
+ * USB configuration
+ */
+
+static int __init
+webcam_config_bind(struct usb_configuration *c)
+{
+	return uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls,
+			       uvc_hs_streaming_cls);
+}
+
+static struct usb_configuration webcam_config_driver = {
+	.label			= webcam_config_label,
+	.bConfigurationValue	= 1,
+	.iConfiguration		= 0, /* dynamic */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower		= CONFIG_USB_GADGET_VBUS_DRAW / 2,
+};
+
+static int /* __init_or_exit */
+webcam_unbind(struct usb_composite_dev *cdev)
+{
+	return 0;
+}
+
+static int __init
+webcam_bind(struct usb_composite_dev *cdev)
+{
+	int ret;
+
+	/* Allocate string descriptor numbers ... note that string contents
+	 * can be overridden by the composite_dev glue.
+	 */
+	if ((ret = usb_string_id(cdev)) < 0)
+		goto error;
+	webcam_strings[STRING_MANUFACTURER_IDX].id = ret;
+	webcam_device_descriptor.iManufacturer = ret;
+
+	if ((ret = usb_string_id(cdev)) < 0)
+		goto error;
+	webcam_strings[STRING_PRODUCT_IDX].id = ret;
+	webcam_device_descriptor.iProduct = ret;
+
+	if ((ret = usb_string_id(cdev)) < 0)
+		goto error;
+	webcam_strings[STRING_DESCRIPTION_IDX].id = ret;
+	webcam_config_driver.iConfiguration = ret;
+
+	/* Register our configuration. */
+	if ((ret = usb_add_config(cdev, &webcam_config_driver,
+					webcam_config_bind)) < 0)
+		goto error;
+
+	INFO(cdev, "Webcam Video Gadget\n");
+	return 0;
+
+error:
+	webcam_unbind(cdev);
+	return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * Driver
+ */
+
+static struct usb_composite_driver webcam_driver = {
+	.name		= "g_webcam",
+	.dev		= &webcam_device_descriptor,
+	.strings	= webcam_device_strings,
+	.max_speed	= USB_SPEED_HIGH,
+	.unbind		= webcam_unbind,
+};
+
+static int __init
+webcam_init(void)
+{
+	return usb_composite_probe(&webcam_driver, webcam_bind);
+}
+
+static void __exit
+webcam_cleanup(void)
+{
+	usb_composite_unregister(&webcam_driver);
+}
+
+module_init(webcam_init);
+module_exit(webcam_cleanup);
+
+MODULE_AUTHOR("Laurent Pinchart");
+MODULE_DESCRIPTION("Webcam Video Gadget");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.0");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/zero.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/zero.c
new file mode 100644
index 0000000..31d3483
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/zero.c
@@ -0,0 +1,353 @@
+/*
+ * zero.c -- Gadget Zero, for USB development
+ *
+ * Copyright (C) 2003-2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+/*
+ * Gadget Zero only needs two bulk endpoints, and is an example of how you
+ * can write a hardware-agnostic gadget driver running inside a USB device.
+ * Some hardware details are visible, but don't affect most of the driver.
+ *
+ * Use it with the Linux host/master side "usbtest" driver to get a basic
+ * functional test of your device-side usb stack, or with "usb-skeleton".
+ *
+ * It supports two similar configurations.  One sinks whatever the usb host
+ * writes, and in return sources zeroes.  The other loops whatever the host
+ * writes back, so the host can read it.
+ *
+ * Many drivers will only have one configuration, letting them be much
+ * simpler if they also don't support high speed operation (like this
+ * driver does).
+ *
+ * Why is *this* driver using two configurations, rather than setting up
+ * two interfaces with different functions?  To help verify that multiple
+ * configuration infrastucture is working correctly; also, so that it can
+ * work with low capability USB controllers without four bulk endpoints.
+ */
+
+/*
+ * driver assumes self-powered hardware, and
+ * has no way for users to trigger remote wakeup.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+
+#include "g_zero.h"
+#include "gadget_chips.h"
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_sourcesink.c"
+#include "f_loopback.c"
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_VERSION		"Cinco de Mayo 2008"
+
+static const char longname[] = "Gadget Zero";
+
+unsigned buflen = 4096;
+module_param(buflen, uint, 0);
+
+/*
+ * Normally the "loopback" configuration is second (index 1) so
+ * it's not the default.  Here's where to change that order, to
+ * work better with hosts where config changes are problematic or
+ * controllers (like original superh) that only support one config.
+ */
+static bool loopdefault = 0;
+module_param(loopdefault, bool, S_IRUGO|S_IWUSR);
+
+/*-------------------------------------------------------------------------*/
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+#ifndef	CONFIG_USB_ZERO_HNPTEST
+#define DRIVER_VENDOR_NUM	0x0525		/* NetChip */
+#define DRIVER_PRODUCT_NUM	0xa4a0		/* Linux-USB "Gadget Zero" */
+#define DEFAULT_AUTORESUME	0
+#else
+#define DRIVER_VENDOR_NUM	0x1a0a		/* OTG test device IDs */
+#define DRIVER_PRODUCT_NUM	0xbadd
+#define DEFAULT_AUTORESUME	5
+#endif
+
+/* If the optional "autoresume" mode is enabled, it provides good
+ * functional coverage for the "USBCV" test harness from USB-IF.
+ * It's always set if OTG mode is enabled.
+ */
+unsigned autoresume = DEFAULT_AUTORESUME;
+module_param(autoresume, uint, S_IRUGO);
+MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup");
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_VENDOR_SPEC,
+
+	.idVendor =		cpu_to_le16(DRIVER_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(DRIVER_PRODUCT_NUM),
+	.bNumConfigurations =	2,
+};
+
+#ifdef CONFIG_USB_OTG
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+#endif
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_SERIAL_IDX		2
+
+static char manufacturer[50];
+
+/* default serial number takes at least two packets */
+static char serial[] = "0123456789.0123456789.0123456789";
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = longname,
+	[STRING_SERIAL_IDX].s = serial,
+	{  }			/* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct usb_request *alloc_ep_req(struct usb_ep *ep)
+{
+	struct usb_request	*req;
+
+	req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+	if (req) {
+		req->length = buflen;
+		req->buf = kmalloc(buflen, GFP_ATOMIC);
+		if (!req->buf) {
+			usb_ep_free_request(ep, req);
+			req = NULL;
+		}
+	}
+	return req;
+}
+
+void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
+{
+	int			value;
+
+	if (ep->driver_data) {
+		value = usb_ep_disable(ep);
+		if (value < 0)
+			DBG(cdev, "disable %s --> %d\n",
+					ep->name, value);
+		ep->driver_data = NULL;
+	}
+}
+
+void disable_endpoints(struct usb_composite_dev *cdev,
+		struct usb_ep *in, struct usb_ep *out)
+{
+	disable_ep(cdev, in);
+	disable_ep(cdev, out);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct timer_list	autoresume_timer;
+
+static void zero_autoresume(unsigned long _c)
+{
+	struct usb_composite_dev	*cdev = (void *)_c;
+	struct usb_gadget		*g = cdev->gadget;
+
+	/* unconfigured devices can't issue wakeups */
+	if (!cdev->config)
+		return;
+
+	/* Normally the host would be woken up for something
+	 * more significant than just a timer firing; likely
+	 * because of some direct user request.
+	 */
+	if (g->speed != USB_SPEED_UNKNOWN) {
+		int status = usb_gadget_wakeup(g);
+		INFO(cdev, "%s --> %d\n", __func__, status);
+	}
+}
+
+static void zero_suspend(struct usb_composite_dev *cdev)
+{
+	if (cdev->gadget->speed == USB_SPEED_UNKNOWN)
+		return;
+
+	if (autoresume) {
+		mod_timer(&autoresume_timer, jiffies + (HZ * autoresume));
+		DBG(cdev, "suspend, wakeup in %d seconds\n", autoresume);
+	} else
+		DBG(cdev, "%s\n", __func__);
+}
+
+static void zero_resume(struct usb_composite_dev *cdev)
+{
+	DBG(cdev, "%s\n", __func__);
+	del_timer(&autoresume_timer);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init zero_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			id;
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_MANUFACTURER_IDX].id = id;
+	device_desc.iManufacturer = id;
+
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_PRODUCT_IDX].id = id;
+	device_desc.iProduct = id;
+
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_SERIAL_IDX].id = id;
+	device_desc.iSerialNumber = id;
+
+	setup_timer(&autoresume_timer, zero_autoresume, (unsigned long) cdev);
+
+	/* Register primary, then secondary configuration.  Note that
+	 * SH3 only allows one config...
+	 */
+	if (loopdefault) {
+		loopback_add(cdev, autoresume != 0);
+		sourcesink_add(cdev, autoresume != 0);
+	} else {
+		sourcesink_add(cdev, autoresume != 0);
+		loopback_add(cdev, autoresume != 0);
+	}
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
+	else {
+		/* gadget zero is so simple (for now, no altsettings) that
+		 * it SHOULD NOT have problems with bulk-capable hardware.
+		 * so just warn about unrcognized controllers -- don't panic.
+		 *
+		 * things like configuration and altsetting numbering
+		 * can need hardware-specific attention though.
+		 */
+		pr_warning("%s: controller '%s' not recognized\n",
+			longname, gadget->name);
+		device_desc.bcdDevice = cpu_to_le16(0x9999);
+	}
+
+
+	INFO(cdev, "%s, version: " DRIVER_VERSION "\n", longname);
+
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+
+	return 0;
+}
+
+static int zero_unbind(struct usb_composite_dev *cdev)
+{
+	del_timer_sync(&autoresume_timer);
+	return 0;
+}
+
+static struct usb_composite_driver zero_driver = {
+	.name		= "zero",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.max_speed	= USB_SPEED_SUPER,
+	.unbind		= zero_unbind,
+	.suspend	= zero_suspend,
+	.resume		= zero_resume,
+};
+
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
+
+static int __init init(void)
+{
+	return usb_composite_probe(&zero_driver, zero_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&zero_driver);
+}
+module_exit(cleanup);